diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index be32c474..57dac68b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -7,13 +7,13 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.18" + go-version: "1.21" - uses: actions/checkout@v2 with: fetch-depth: 0 - name: golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: skip-go-installation: true - version: v1.46.2 - args: --timeout=10m + version: v1.52.0 + args: --timeout=10m \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 717deb88..90349cfd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -14,17 +14,17 @@ jobs: fetch-depth: 0 - uses: actions/setup-go@v2 with: - go-version: "1.18" + go-version: "1.21" - name: Login to DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v2.6.1 + uses: goreleaser/goreleaser-action@v4.3.0 with: distribution: goreleaser - version: latest - args: --rm-dist + version: v1.18.2 + args: --clean env: GITHUB_TOKEN: ${{ secrets.GO_RELEASER_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 642fbb6a..44d87632 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -8,6 +8,24 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: "1.18" + go-version: "1.21" - name: run tests run: make test + e2e-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 + with: + go-version: "1.21" + - name: run e2e-tests + run: make e2e-test + benchmark: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 + with: + go-version: "1.21" + - name: run benchmark + run: make benchmark diff --git a/.gitignore b/.gitignore index 38bb345a..a460164f 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,9 @@ tmp/ expt/ -entropy.dev.yaml - -requests.http \ No newline at end of file +entropy*.dev.yaml +entropy*.dev.yml +test_*.yml +test_*.json +requests.http +entropy.yaml \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index de8a79e7..a63cd015 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ run: - go: '1.17' + go: '1.21' timeout: 10m skip-files: - expt/main.go @@ -8,44 +8,46 @@ output: format: colored-line-number linters: - enable-all: true - disable: - - exhaustruct - - cyclop - - exhaustive - - exhaustivestruct - - exhaustruct - - funlen - - gochecknoglobals + disable-all: true + enable-all: false + enable: + - bodyclose + - depguard + - dogsled + - dupl + - errcheck + - exportloopref + - gci + - gochecknoinits + - goconst + - gocritic - gocyclo - - godox - - gofumpt - - golint - - interfacer - - lll - - maintidx - - ireturn - - maligned - - nlreturn + - gofmt + - goimports + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - decorder + - ineffassign + - misspell + - nakedret + - noctx - nolintlint - - prealloc - - promlinter - - scopelint - - tagliatelle - - testpackage - - paralleltest - - tparallel - - containedctx - - varnamelen - - wrapcheck - - wsl - + - revive + - staticcheck + - stylecheck + - unconvert + - unparam + - unused + - whitespace linters-settings: decorder: dec-order: - - type - const - var + - type - func disable-dec-order-check: false disable-init-func-first-check: false @@ -112,12 +114,12 @@ linters-settings: checks: - "all" goimports: - local-prefixes: github.com/odpf/entropy + local-prefixes: github.com/goto/entropy gci: sections: - standard # Captures all standard packages if they do not match another section. - default # Contains all imports that could not be matched to another section type. - - prefix(github.com/odpf/entropy) # Groups all imports with the specified Prefix. + - prefix(github.com/goto/entropy) # Groups all imports with the specified Prefix. gocritic: disabled-checks: - ifElseChain @@ -143,5 +145,8 @@ issues: linters: - forbidigo - contextcheck + - path: / + linters: + - typecheck severity: default-severity: error diff --git a/.goreleaser.yml b/.goreleaser.yml index ab95a0cd..088acbc4 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -10,9 +10,9 @@ builds: binary: entropy flags: [-a] ldflags: - - -X github.com/odpf/entropy/pkg/version.Version={{.Tag}} - - -X github.com/odpf/entropy/pkg/version.Commit={{.FullCommit}} - - -X github.com/odpf/entropy/pkg/version.BuildTime={{.Date}} + - -X github.com/goto/entropy/pkg/version.Version={{.Tag}} + - -X github.com/goto/entropy/pkg/version.Commit={{.FullCommit}} + - -X github.com/goto/entropy/pkg/version.BuildTime={{.Date}} goos: - darwin - linux @@ -58,16 +58,16 @@ dockers: - entropy dockerfile: Dockerfile image_templates: - - 'docker.io/odpf/{{.ProjectName}}:latest' - - 'docker.io/odpf/{{.ProjectName}}:{{ .Version }}' - - 'docker.io/odpf/{{.ProjectName}}:{{ .Version }}-amd64' + - 'docker.io/gotocompany/{{.ProjectName}}:latest' + - 'docker.io/gotocompany/{{.ProjectName}}:{{ .Version }}' + - 'docker.io/gotocompany/{{.ProjectName}}:{{ .Version }}-amd64' brews: - name: entropy - homepage: "https://github.com/odpf/entropy" + homepage: "https://github.com/goto/entropy" description: "Infrastructure orchestration tool." tap: - owner: odpf + owner: goto name: homebrew-tap license: "Apache 2.0" folder: Formula @@ -76,5 +76,5 @@ brews: install: |- bin.install "entropy" commit_author: - name: Rohil Surana - email: rohilsurana96@gmail.com + name: github-actions[bot] + email: 41898282+github-actions[bot]@users.noreply.github.com diff --git a/Makefile b/Makefile index afaf4975..b33fb0b6 100644 --- a/Makefile +++ b/Makefile @@ -1,22 +1,31 @@ -NAME=github.com/odpf/entropy +NAME=github.com/goto/entropy VERSION=$(shell git describe --tags --always --first-parent 2>/dev/null) COMMIT=$(shell git rev-parse --short HEAD) +PROTON_COMMIT="6c5bc2b621abe2812cc8288a5f6363570bab911a" BUILD_TIME=$(shell date) COVERAGE_DIR=coverage BUILD_DIR=dist EXE=entropy -.PHONY: all build clean tidy format test test-coverage +.PHONY: all build clean tidy format test test-coverage proto -all: clean test build format lint +all: format clean test build tidy: @echo "Tidy up go.mod..." @go mod tidy -v -install: - @echo "Installing Entropy to ${GOBIN}..." - @go install +install: ## install required dependencies + @echo "> installing dependencies" + go install github.com/vektra/mockery/v2@latest + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 + go get -d google.golang.org/protobuf/proto@v1.28.1 + go get -d google.golang.org/grpc@v1.49.0 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2.0 + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@v2.11.3 + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@v2.11.3 + go install github.com/bufbuild/buf/cmd/buf@v1.7.0 + go install github.com/envoyproxy/protoc-gen-validate@v0.6.7 format: @echo "Running gofumpt..." @@ -29,6 +38,8 @@ lint: clean: tidy @echo "Cleaning up build directories..." @rm -rf ${COVERAGE_DIR} ${BUILD_DIR} + +generate: @echo "Running go-generate..." @go generate ./... @@ -37,15 +48,30 @@ test: tidy @echo "Running unit tests..." @go test ./... -coverprofile=${COVERAGE_DIR}/coverage.out +e2e-test: tidy + @echo "Running e2e-test tests..." + @go test -v ./test/e2e_test/... -timeout 20m + test-coverage: test @echo "Generating coverage report..." @go tool cover -html=${COVERAGE_DIR}/coverage.out +benchmark: tidy + @echo "Running benchmark tests..." + @go test ./... -bench=. -benchmem + build: clean @mkdir -p ${BUILD_DIR} @echo "Running build for '${VERSION}' in '${BUILD_DIR}/'..." @CGO_ENABLED=0 go build -ldflags '-X "${NAME}/pkg/version.Version=${VERSION}" -X "${NAME}/pkg/version.Commit=${COMMIT}" -X "${NAME}/pkg/version.BuildTime=${BUILD_TIME}"' -o ${BUILD_DIR}/${EXE} +proto: ## Generate the protobuf files + @echo " > generating protobuf from goto/proton" + @echo " > [info] make sure correct version of dependencies are installed using 'make install'" + @rm -rf ./proto + @buf generate https://github.com/goto/proton/archive/${PROTON_COMMIT}.zip#strip_components=1 --template buf.gen.yaml --path gotocompany/entropy --path gotocompany/common + @echo " > protobuf compilation finished" + download: @go mod download diff --git a/README.md b/README.md index 335f03f6..6ee19413 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # Entropy -![test workflow](https://github.com/odpf/entropy/actions/workflows/test.yml/badge.svg) -[![Go Report Card](https://goreportcard.com/badge/github.com/odpf/entropy)](https://goreportcard.com/report/github.com/odpf/entropy) -[![Version](https://img.shields.io/github/v/release/odpf/entropy?logo=semantic-release)](Version) +![test workflow](https://github.com/goto/entropy/actions/workflows/test.yml/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/github.com/goto/entropy)](https://goreportcard.com/report/github.com/goto/entropy) +[![Version](https://img.shields.io/github/v/release/goto/entropy?logo=semantic-release)](Version) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg?logo=apache)](LICENSE) Entropy is an extensible infrastructure orchestration and application deployment tool. Entropy provides features @@ -23,7 +23,7 @@ Install Entropy on macOS, Windows, Linux, OpenBSD, FreeBSD, and on any machine. #### Binary (Cross-platform) -Download the appropriate version for your platform from [releases](https://github.com/odpf/entropy/releases) page. Once +Download the appropriate version for your platform from [releases](https://github.com/goto/entropy/releases) page. Once downloaded, the binary can be run from anywhere. You don’t need to install it into a global location. This works well for shared hosts and other systems where you don’t have a privileged account. Ideally, you should install it somewhere in your PATH for easy use. `/usr/local/bin` is the most probable location. @@ -32,7 +32,7 @@ in your PATH for easy use. `/usr/local/bin` is the most probable location. ```sh # Install entropy (requires homebrew installed) -$ brew install odpf/tap/entropy +$ brew install goto/tap/entropy # Check for installed entropy version $ entropy version @@ -58,7 +58,7 @@ $ entropy serve --config ./my_config.yaml ```sh # Clone the repo -$ git clone https://github.com/odpf/entropy.git +$ git clone https://github.com/goto/entropy.git # Build entropy binary file $ make build @@ -83,14 +83,14 @@ $ make test Development of Entropy happens in the open on GitHub, and we are grateful to the community for contributing bugfixes and improvements. Read below to learn how you can take part in improving Entropy. -Read our [contributing guide](https://odpf.github.io/entropy/docs/contribute/contributing) to learn about our +Read our [contributing guide](https://goto.github.io/entropy/docs/contribute/contributing) to learn about our development process, how to propose bugfixes and improvements, and how to build and test your changes to Entropy. To help you get your feet wet and get you familiar with our contribution process, we have a list -of [good first issues](https://github.com/odpf/entropy/labels/good%20first%20issue) that contain bugs which have a +of [good first issues](https://github.com/goto/entropy/labels/good%20first%20issue) that contain bugs which have a relatively limited scope. This is a great place to get started. -This project exists thanks to all the [contributors](https://github.com/odpf/entropy/graphs/contributors). +This project exists thanks to all the [contributors](https://github.com/goto/entropy/graphs/contributors). ## License diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 00000000..d6efdb7f --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,26 @@ +version: v1 +plugins: + - plugin: buf.build/protocolbuffers/go:v1.30.0 + out: proto + opt: paths=source_relative + - plugin: buf.build/grpc/go:v1.3.0 + out: proto + opt: paths=source_relative,require_unimplemented_servers=true + - plugin: buf.build/bufbuild/validate-go:v1.0.1 + out: proto + opt: + - paths=source_relative + - plugin: buf.build/grpc-ecosystem/gateway:v2.15.2 + out: proto + opt: + - paths=source_relative + - allow_repeated_fields_in_body=true + - plugin: buf.build/grpc-ecosystem/openapiv2:v2.15.2 + out: proto + opt: + - allow_repeated_fields_in_body=true + - output_format=yaml + - allow_merge=true + - merge_file_name=entropy + - openapi_naming_strategy=simple + - json_names_for_fields=false \ No newline at end of file diff --git a/cli/action.go b/cli/action.go deleted file mode 100644 index 434f559a..00000000 --- a/cli/action.go +++ /dev/null @@ -1,78 +0,0 @@ -package cli - -import ( - "fmt" - - "github.com/MakeNowJust/heredoc" - "github.com/odpf/salt/printer" - "github.com/odpf/salt/term" - "github.com/spf13/cobra" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" - "google.golang.org/protobuf/types/known/structpb" -) - -func cmdAction() *cobra.Command { - var urn, file, output string - var params structpb.Value - cmd := &cobra.Command{ - Use: "action ", - Aliases: []string{"action"}, - Short: "Manage actions", - Example: heredoc.Doc(` - $ entropy action start --urn= --file= --out=json - `), - Annotations: map[string]string{ - "group:core": "true", - }, - Args: cobra.ExactArgs(1), - RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") - defer spinner.Stop() - - var reqBody entropyv1beta1.ApplyActionRequest - if file != "" { - if err := parseFile(file, ¶ms); err != nil { - return err - } - reqBody.Params = ¶ms - } - - reqBody.Urn = urn - reqBody.Action = args[0] - - err := reqBody.ValidateAll() - if err != nil { - return err - } - - client, cancel, err := createClient(cmd) - if err != nil { - return err - } - defer cancel() - - res, err := client.ApplyAction(cmd.Context(), &reqBody) - if err != nil { - return err - } - spinner.Stop() - - fmt.Println(term.Greenf("Action applied successfully")) - if output == outputJSON || output == outputYAML || output == outputYML { - formattedString, err := formatOutput(res.GetResource(), output) - if err != nil { - return err - } - fmt.Println(term.Bluef(formattedString)) - } - - return nil - }), - } - - cmd.Flags().StringVarP(&urn, "urn", "u", "", "urn of the resource") - cmd.Flags().StringVarP(&file, "file", "f", "", "path to the params file") - cmd.Flags().StringVarP(&output, "out", "o", "", "output format, `-o json | yaml`") - - return cmd -} diff --git a/cli/cli.go b/cli/cli.go index 2894e01a..64a53801 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -3,7 +3,7 @@ package cli import ( "context" - "github.com/odpf/salt/cmdx" + "github.com/goto/salt/cmdx" "github.com/spf13/cobra" ) @@ -20,10 +20,10 @@ func Execute(ctx context.Context) { cmdServe(), cmdMigrate(), cmdVersion(), - cmdShowConfigs(), - cmdResource(), - cmdAction(), - cmdLogs(), + cmdConfig(), + cmdResourceCommand(), + cmdModuleCommand(), + cmdWorker(), ) cmdx.SetHelp(rootCmd) diff --git a/cli/client.go b/cli/client.go index 565971e1..6acfb9db 100644 --- a/cli/client.go +++ b/cli/client.go @@ -2,36 +2,107 @@ package cli import ( "context" - "strconv" "time" + "github.com/MakeNowJust/heredoc" "github.com/spf13/cobra" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" ) -const timeout = 2 +const ( + flagOutFormat = "format" + flagEntropyHost = "entropy" + flagDialTimeout = "timeout" + + dialTimeout = 5 * time.Second +) -func createConnection(ctx context.Context, host string) (*grpc.ClientConn, error) { - opts := []grpc.DialOption{ - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), +func cmdResourceCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "resource", + Short: "Entropy client with resource management commands", + Example: heredoc.Doc(` + $ entropy resource create -f + $ entropy resource list + $ entropy resource view -u + $ entropy resource delete -u + $ entropy resource edit -u -f + $ entropy resource revisions -u + `), } - return grpc.DialContext(ctx, host, opts...) + cfg, _ := loadClientConfig() + + cmd.PersistentFlags().StringP(flagEntropyHost, "h", cfg.Host, "Entropy host to connect to") + cmd.PersistentFlags().DurationP(flagDialTimeout, "", dialTimeout, "Dial timeout") + cmd.PersistentFlags().StringP(flagOutFormat, "o", "pretty", "output format (json, yaml, pretty)") + + cmd.AddCommand( + cmdCreateResource(), + cmdViewResource(), + cmdEditResource(), + cmdStreamLogs(), + cmdApplyAction(), + cmdDeleteResource(), + cmdListRevisions(), + ) + + return cmd } -func createClient(cmd *cobra.Command) (entropyv1beta1.ResourceServiceClient, func(), error) { - c, err := loadConfig(cmd) +func cmdModuleCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "module", + Short: "Entropy client with module management commands", + Example: heredoc.Doc(` + $ entropy resource create -f + $ entropy resource update -u -f + $ entropy resource view -u + `), + } + + cfg, _ := loadClientConfig() + + cmd.PersistentFlags().StringP(flagEntropyHost, "h", cfg.Host, "Entropy host to connect to") + cmd.PersistentFlags().DurationP(flagDialTimeout, "", dialTimeout, "Dial timeout") + cmd.PersistentFlags().StringP(flagOutFormat, "o", "pretty", "output format (json, yaml, pretty)") + + cmd.AddCommand( + cmdModuleCreate(), + cmdModuleUpdate(), + cmdModuleView(), + ) + + return cmd +} + +func createResourceServiceClient(cmd *cobra.Command) (entropyv1beta1.ResourceServiceClient, func(), error) { + dialTimeoutVal, _ := cmd.Flags().GetDuration(flagDialTimeout) + entropyAddr, _ := cmd.Flags().GetString(flagEntropyHost) + + dialCtx, dialCancel := context.WithTimeout(cmd.Context(), dialTimeoutVal) + conn, err := grpc.DialContext(dialCtx, entropyAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { + dialCancel() return nil, nil, err } - host := c.Service.Host + ":" + strconv.Itoa(c.Service.Port) + cancel := func() { + dialCancel() + _ = conn.Close() + } + return entropyv1beta1.NewResourceServiceClient(conn), cancel, nil +} + +func createModuleServiceClient(cmd *cobra.Command) (entropyv1beta1.ModuleServiceClient, func(), error) { + dialTimeoutVal, _ := cmd.Flags().GetDuration(flagDialTimeout) + entropyAddr, _ := cmd.Flags().GetString(flagEntropyHost) - dialTimeoutCtx, dialCancel := context.WithTimeout(cmd.Context(), time.Second*timeout) - conn, err := createConnection(dialTimeoutCtx, host) + dialCtx, dialCancel := context.WithTimeout(cmd.Context(), dialTimeoutVal) + conn, err := grpc.DialContext(dialCtx, entropyAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { dialCancel() return nil, nil, err @@ -39,9 +110,7 @@ func createClient(cmd *cobra.Command) (entropyv1beta1.ResourceServiceClient, fun cancel := func() { dialCancel() - conn.Close() + _ = conn.Close() } - - client := entropyv1beta1.NewResourceServiceClient(conn) - return client, cancel, nil + return entropyv1beta1.NewModuleServiceClient(conn), cancel, nil } diff --git a/cli/config.go b/cli/config.go index 034db2b2..73b6c0c7 100644 --- a/cli/config.go +++ b/cli/config.go @@ -6,13 +6,16 @@ import ( "os" "time" - "github.com/odpf/salt/config" + "github.com/MakeNowJust/heredoc" + "github.com/goto/salt/cmdx" + "github.com/goto/salt/config" + "github.com/goto/salt/printer" "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/logger" - "github.com/odpf/entropy/pkg/telemetry" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/logger" + "github.com/goto/entropy/pkg/telemetry" ) const configFlag = "config" @@ -20,38 +23,95 @@ const configFlag = "config" // Config contains the application configuration. type Config struct { Log logger.LogConfig `mapstructure:"log"` - Worker workerConf `mapstructure:"worker"` - Service serveConfig `mapstructure:"service"` + Syncer SyncerConf `mapstructure:"syncer"` + Service ServeConfig `mapstructure:"service"` PGConnStr string `mapstructure:"pg_conn_str" default:"postgres://postgres@localhost:5432/entropy?sslmode=disable"` Telemetry telemetry.Config `mapstructure:"telemetry"` } -type serveConfig struct { +type SyncerConf struct { + SyncInterval time.Duration `mapstructure:"sync_interval" default:"1s"` + RefreshInterval time.Duration `mapstructure:"refresh_interval" default:"3s"` + ExtendLockBy time.Duration `mapstructure:"extend_lock_by" default:"5s"` + SyncBackoffInterval time.Duration `mapstructure:"sync_backoff_interval" default:"5s"` + MaxRetries int `mapstructure:"max_retries" default:"5"` + Workers map[string]WorkerConfig `mapstructure:"workers" default:"[]"` +} + +type WorkerConfig struct { + Count int `mapstructure:"count" default:"1"` + Scope map[string][]string `mapstructure:"labels"` +} + +type ServeConfig struct { Host string `mapstructure:"host" default:""` Port int `mapstructure:"port" default:"8080"` -} -type workerConf struct { - QueueName string `mapstructure:"queue_name" default:"entropy_jobs"` - QueueSpec string `mapstructure:"queue_spec" default:"postgres://postgres@localhost:5432/entropy?sslmode=disable"` + HTTPAddr string `mapstructure:"http_addr" default:":8081"` + PaginationSizeDefault int32 `mapstructure:"pagination_size_default" default:"0"` + PaginationPageDefault int32 `mapstructure:"pagination_page_default" default:"1"` +} - Threads int `mapstructure:"threads" default:"1"` - PollInterval time.Duration `mapstructure:"poll_interval" default:"100ms"` +type clientConfig struct { + Host string `mapstructure:"host" default:"localhost:8080"` } -func (serveCfg serveConfig) addr() string { +func (serveCfg ServeConfig) httpAddr() string { return serveCfg.HTTPAddr } + +func (serveCfg ServeConfig) grpcAddr() string { return fmt.Sprintf("%s:%d", serveCfg.Host, serveCfg.Port) } +func cmdConfig() *cobra.Command { + cmd := &cobra.Command{ + Use: "config", + Short: "Manage configuration", + Example: heredoc.Doc(` + $ entropy config init + $ entropy config view + `), + } + + cmd.AddCommand(cmdInitConfig(), cmdShowConfigs()) + + return cmd +} + +func cmdInitConfig() *cobra.Command { + return &cobra.Command{ + Use: "init", + Short: "Initialize client configuration", + RunE: handleErr(func(cmd *cobra.Command, args []string) error { + cfg := cmdx.SetConfig("entropy") + + if err := cfg.Init(&clientConfig{}); err != nil { + return err + } + + fmt.Printf("config created: %v\n", cfg.File()) + return nil + }), + } +} + func cmdShowConfigs() *cobra.Command { return &cobra.Command{ - Use: "configs", + Use: "view", Short: "Display configurations currently loaded", RunE: handleErr(func(cmd *cobra.Command, args []string) error { + clientCfg := cmdx.SetConfig("entropy") + data, err := clientCfg.Read() + if err != nil { + fatalExitf("failed to read client configs: %v", err) + } + printer.Textln("Client config") + yaml.NewEncoder(os.Stdout).Encode(data) + cfg, err := loadConfig(cmd) if err != nil { fatalExitf("failed to read configs: %v", err) } + printer.Textln("Config") return yaml.NewEncoder(os.Stdout).Encode(cfg) }), } @@ -78,5 +138,17 @@ func loadConfig(cmd *cobra.Command) (Config, error) { return cfg, err } + PaginationSizeDefault = cfg.Service.PaginationSizeDefault + PaginationPageDefault = cfg.Service.PaginationPageDefault + return cfg, nil } + +func loadClientConfig() (*clientConfig, error) { + var config clientConfig + + cfg := cmdx.SetConfig("entropy") + err := cfg.Load(&config) + + return &config, err +} diff --git a/cli/display.go b/cli/display.go new file mode 100644 index 00000000..323b0ad4 --- /dev/null +++ b/cli/display.go @@ -0,0 +1,93 @@ +package cli + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/BurntSushi/toml" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/goto/entropy/pkg/errors" +) + +type FormatFn func(w io.Writer, v any) error + +// Display formats the given value 'v' using format specified as value for --format +// flag and writes to STDOUT. If --format=pretty/human, custom-formatter passed will +// be used. +func Display(cmd *cobra.Command, v any, prettyFormatter FormatFn) error { + format, _ := cmd.Flags().GetString("format") + format = strings.ToLower(strings.TrimSpace(format)) + + var formatter FormatFn + switch format { + case "json": + formatter = JSONFormat + + case "yaml", "yml": + formatter = YAMLFormat + + case "toml": + formatter = TOMLFormat + + case "pretty", "human": + if prettyFormatter != nil { + formatter = prettyFormatter + } else { + formatter = GoFormat + } + } + + if formatter == nil { + return errors.Errorf("--format value '%s' is not valid", format) + } + + return formatter(os.Stdout, v) +} + +// JSONFormat outputs 'v' formatted as indented JSON. +func JSONFormat(w io.Writer, v any) error { + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + return enc.Encode(v) +} + +// TOMLFormat outputs 'v' formatted as per TOML spec. +func TOMLFormat(w io.Writer, v any) error { + enc := toml.NewEncoder(w) + return enc.Encode(v) +} + +// YAMLFormat outputs 'v' formatted as per YAML spec. +func YAMLFormat(w io.Writer, v any) error { + // note: since most values are json tagged but may not be + // yaml tagged, we do this to ensure keys are snake-cased. + val, err := jsonConvert(v) + if err != nil { + return err + } + return yaml.NewEncoder(w).Encode(val) +} + +// GoFormat outputs 'v' formatted using pp package. +func GoFormat(w io.Writer, v any) error { + _, err := fmt.Fprintln(w, v) + return err +} + +func jsonConvert(v any) (any, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + + var val any + if err := json.Unmarshal(b, &val); err != nil { + return nil, err + } + return val, nil +} diff --git a/cli/logs.go b/cli/logs.go deleted file mode 100644 index e2710985..00000000 --- a/cli/logs.go +++ /dev/null @@ -1,80 +0,0 @@ -package cli - -import ( - "errors" - "fmt" - "io" - "log" - "strings" - - "github.com/odpf/salt/term" // nolint - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" - - "github.com/MakeNowJust/heredoc" - "github.com/odpf/salt/printer" - "github.com/spf13/cobra" -) - -func cmdLogs() *cobra.Command { - var filter []string - filters := make(map[string]string) - cmd := &cobra.Command{ - Use: "logs ", - Aliases: []string{"logs"}, - Short: "Gets logs", - Example: heredoc.Doc(` - $ entropy logs --filter="key1=value1" --filter="key2=value2" - `), - Annotations: map[string]string{ - "group:core": "true", - }, - Args: cobra.ExactArgs(1), - RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") - defer spinner.Stop() - - client, cancel, err := createClient(cmd) - if err != nil { - return err - } - defer cancel() - - var reqBody entropyv1beta1.GetLogRequest - for _, f := range filter { - keyValue := strings.Split(f, "=") - filters[keyValue[0]] = keyValue[1] - } - reqBody.Filter = filters - reqBody.Urn = args[0] - - err = reqBody.ValidateAll() - if err != nil { - return err - } - - stream, err := client.GetLog(cmd.Context(), &reqBody) - if err != nil { - return err - } - spinner.Stop() - - for { - resp, err := stream.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return fmt.Errorf("failed to read stream: %w", err) - } - - log.SetFlags(0) - log.Printf(term.Bluef("%s", resp.GetChunk().GetData())) // nolint - } - - return nil - }), - } - - cmd.Flags().StringArrayVarP(&filter, "filter", "f", nil, "Use filters. Example: --filter=\"key=value\"") - return cmd -} diff --git a/cli/migrate.go b/cli/migrate.go index 60b120e1..decbcebb 100644 --- a/cli/migrate.go +++ b/cli/migrate.go @@ -4,9 +4,8 @@ import ( "context" "github.com/spf13/cobra" - "go.uber.org/zap" - "github.com/odpf/entropy/pkg/logger" + "github.com/goto/entropy/pkg/logger" ) func cmdMigrate() *cobra.Command { @@ -24,18 +23,18 @@ func cmdMigrate() *cobra.Command { return err } - zapLog, err := logger.New(&cfg.Log) + err = logger.Setup(&cfg.Log) if err != nil { return err } - return runMigrations(cmd.Context(), zapLog, cfg) + return runMigrations(cmd.Context(), cfg) }) return cmd } -func runMigrations(ctx context.Context, zapLog *zap.Logger, cfg Config) error { - store := setupStorage(zapLog, cfg.PGConnStr) +func runMigrations(ctx context.Context, cfg Config) error { + store := setupStorage(cfg.PGConnStr, cfg.Syncer, cfg.Service) return store.Migrate(ctx) } diff --git a/cli/module.go b/cli/module.go new file mode 100644 index 00000000..4d18c242 --- /dev/null +++ b/cli/module.go @@ -0,0 +1,179 @@ +package cli + +import ( + "fmt" + "io" + "os" + + "github.com/MakeNowJust/heredoc" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/salt/printer" + "github.com/spf13/cobra" +) + +func cmdModuleCreate() *cobra.Command { + var filePath string + cmd := &cobra.Command{ + Use: "create", + Short: "Create a new module", + Example: heredoc.Doc(` + $ entropy module create -f module.yaml + $ entropy module create --file module.yaml + `), + Annotations: map[string]string{ + "module": "core", + }, + } + + cmd.RunE = handleErr(func(cmd *cobra.Command, args []string) error { + var reqBody entropyv1beta1.Module + + if err := parseFile(filePath, &reqBody); err != nil { + return err + } + + if err := reqBody.ValidateAll(); err != nil { + return err + } + + client, cancel, err := createModuleServiceClient(cmd) + if err != nil { + return err + } + defer cancel() + + req := &entropyv1beta1.CreateModuleRequest{ + Module: &reqBody, + } + + spinner := printer.Spin("Creating resource...") + defer spinner.Stop() + mod, err := client.CreateModule(cmd.Context(), req) + if err != nil { + return err + } + spinner.Stop() + + module := mod.GetModule() + return Display(cmd, module, func(w io.Writer, v any) error { + _, _ = fmt.Fprintf(w, "Module created with URN '%s'.\n", module.Urn) + _, _ = fmt.Fprintln(w, "Use 'entropy module view ' to view module.") + return nil + }) + }) + + cmd.Flags().StringVarP(&filePath, "file", "f", "", "Path to the module body file") + cmd.MarkFlagRequired("file") + + return cmd +} + +func cmdModuleUpdate() *cobra.Command { + var filePath, urn string + + cmd := &cobra.Command{ + Use: "update", + Short: "Update a module", + Example: heredoc.Doc(` + $ entropy module update -u orn:entropy:module:test-project:test-name -f module.yaml + $ entropy module update -urn orn:entropy:module:test-project:test-name -file module.yaml + `), + Annotations: map[string]string{ + "module": "core", + }, + } + + cmd.RunE = handleErr(func(cmd *cobra.Command, args []string) error { + var reqBody entropyv1beta1.Module + + if err := parseFile(filePath, &reqBody); err != nil { + return err + } + + if err := reqBody.ValidateAll(); err != nil { + return err + } + + client, cancel, err := createModuleServiceClient(cmd) + if err != nil { + return err + } + defer cancel() + + spinner := printer.Spin("Updating module...") + defer spinner.Stop() + req := &entropyv1beta1.UpdateModuleRequest{ + Urn: urn, + Configs: reqBody.Configs, + } + spinner.Stop() + + mod, err := client.UpdateModule(cmd.Context(), req) + if err != nil { + return err + } + + module := mod.GetModule() + return Display(cmd, module, func(w io.Writer, v any) error { + _, _ = fmt.Fprintf(w, "Module updated with URN '%s'.\n", module.Urn) + _, _ = fmt.Fprintln(w, "Use 'entropy module view ' to view module.") + return nil + }) + }) + + cmd.Flags().StringVarP(&filePath, "file", "f", "", "Path to the module body file") + cmd.MarkFlagRequired("file") + cmd.Flags().StringVarP(&urn, "urn", "u", "", "URN of the module to update") + cmd.MarkFlagRequired("urn") + + return cmd +} + +func cmdModuleView() *cobra.Command { + var urn string + cmd := &cobra.Command{ + Use: "view", + Short: "View a module by URN", + Example: heredoc.Doc(` + $ entropy module view -u orn:entropy:module:test-project:test-name + $ entropy module view --urn orn:entropy:module:test-project:test-name + `), + Annotations: map[string]string{ + "module": "core", + }, + } + + cmd.RunE = handleErr(func(cmd *cobra.Command, args []string) error { + client, cancel, err := createModuleServiceClient(cmd) + if err != nil { + return err + } + defer cancel() + + spinner := printer.Spin("Fetching module...") + defer spinner.Stop() + req := &entropyv1beta1.GetModuleRequest{ + Urn: urn, + } + spinner.Stop() + + mod, err := client.GetModule(cmd.Context(), req) + if err != nil { + return err + } + + module := mod.GetModule() + return Display(cmd, module, func(w io.Writer, v any) error { + printer.Table(os.Stdout, [][]string{ + {"URN", "NAME", "PROJECT", "Config"}, + {module.Urn, module.Name, module.Project, module.Configs.GetStringValue()}, + }) + return nil + }) + }) + + cmd.Flags().StringVarP(&urn, "urn", "u", "", "URN of the module to view") + cmd.MarkFlagRequired("urn") + + return cmd +} diff --git a/cli/resource.go b/cli/resource.go index 8669201b..47130f68 100644 --- a/cli/resource.go +++ b/cli/resource.go @@ -2,363 +2,386 @@ package cli import ( "fmt" + "io" "os" + "strings" - "github.com/odpf/salt/term" // nolint - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" - - "github.com/MakeNowJust/heredoc" - "github.com/odpf/salt/printer" + "github.com/goto/salt/printer" "github.com/spf13/cobra" -) - -func cmdResource() *cobra.Command { - cmd := &cobra.Command{ - Use: "resource", - Aliases: []string{"resources"}, - Short: "Manage resources", - Annotations: map[string]string{ - "group:core": "true", - }, - Example: heredoc.Doc(` - $ entropy resource create - $ entropy resource list - $ entropy resource view - $ entropy resource delete - $ entropy resource edit - $ entropy resource revisions - `), - } + "google.golang.org/protobuf/types/known/structpb" - cmd.AddCommand( - createResourceCommand(), - listAllResourcesCommand(), - viewResourceCommand(), - editResourceCommand(), - deleteResourceCommand(), - getRevisionsCommand(), - ) + "github.com/goto/entropy/pkg/errors" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" +) - return cmd -} +var PaginationSizeDefault, PaginationPageDefault int32 -func createResourceCommand() *cobra.Command { - var file, output string +func cmdViewResource() *cobra.Command { + var kind, project, urn string + var pageNum, pageSize int32 cmd := &cobra.Command{ - Use: "create", - Short: "create a resource", - Example: heredoc.Doc(` - $ entropy resource create --file= --out=json - `), - Annotations: map[string]string{ - "action:core": "true", - }, + Use: "get", + Short: "List or View existing resource(s)", + Aliases: []string{"view"}, RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") - defer spinner.Stop() - - var reqBody entropyv1beta1.CreateResourceRequest - if err := parseFile(file, &reqBody); err != nil { - return err - } else if err := reqBody.ValidateAll(); err != nil { - return err - } - - client, cancel, err := createClient(cmd) + client, cancel, err := createResourceServiceClient(cmd) if err != nil { return err } defer cancel() - res, err := client.CreateResource(cmd.Context(), &reqBody) - if err != nil { - return err - } - spinner.Stop() - - fmt.Println("URN: \t", term.Greenf(res.Resource.Urn)) - if output == outputJSON || output == outputYAML || output == outputYML { - formattedOutput, err := formatOutput(res.GetResource(), output) + if urn != "" { + // get resource + req := entropyv1beta1.GetResourceRequest{ + Urn: urn, + } + spinner := printer.Spin("Getting resource...") + defer spinner.Stop() + res, err := client.GetResource(cmd.Context(), &req) if err != nil { return err } - fmt.Println(term.Bluef(formattedOutput)) - } - - return nil - }), - } + spinner.Stop() - cmd.Flags().StringVarP(&file, "file", "f", "", "path to body of resource") - cmd.Flags().StringVarP(&output, "out", "o", "", "output format, `-o json | yaml`") - - return cmd -} - -func listAllResourcesCommand() *cobra.Command { - var output, kind, project string - cmd := &cobra.Command{ - Use: "list", - Short: "list all resources", - Example: heredoc.Doc(` - $ entropy resource list --kind= --project= --out=json - `), - Annotations: map[string]string{ - "action:core": "true", - }, - RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") - defer spinner.Stop() + r := res.GetResource() + return Display(cmd, r, func(w io.Writer, v any) error { + printer.Table(os.Stdout, [][]string{ + {"URN", "NAME", "KIND", "PROJECT", "STATUS"}, + {r.Urn, r.Name, r.Kind, r.Project, r.State.Status.String()}, + }) - var reqBody entropyv1beta1.ListResourcesRequest - reqBody.Kind = kind - reqBody.Project = project + return nil + }) + } - client, cancel, err := createClient(cmd) - if err != nil { - return err + // list resource + req := entropyv1beta1.ListResourcesRequest{ + Kind: kind, + Project: project, + PageNum: pageNum, + PageSize: pageSize, } - defer cancel() - res, err := client.ListResources(cmd.Context(), &reqBody) + spinner := printer.Spin("Listing resources...") + defer spinner.Stop() + res, err := client.ListResources(cmd.Context(), &req) if err != nil { return err } spinner.Stop() - if output == outputJSON || output == outputYAML || output == outputYML { - for _, resource := range res.GetResources() { - formattedOutput, err := formatOutput(resource, output) - if err != nil { - return err - } - fmt.Println(term.Bluef(formattedOutput)) - } - } else { + resources := res.GetResources() + return Display(cmd, resources, func(w io.Writer, _ any) error { var report [][]string report = append(report, []string{"URN", "NAME", "KIND", "PROJECT", "STATUS"}) - count := 0 - for _, r := range res.GetResources() { + for _, r := range resources { report = append(report, []string{r.Urn, r.Name, r.Kind, r.Project, r.State.Status.String()}) - count++ } + _, _ = fmt.Fprintf(w, "Total: %d\n", len(report)-1) printer.Table(os.Stdout, report) - fmt.Println("\nTotal: ", count) - - fmt.Println(term.Cyanf("To view all the data in JSON/YAML format, use flag `-o json | yaml`")) - } - return nil + return nil + }) }), } - cmd.Flags().StringVarP(&output, "out", "o", "", "output format, `-o json | yaml`") cmd.Flags().StringVarP(&kind, "kind", "k", "", "kind of resources") cmd.Flags().StringVarP(&project, "project", "p", "", "project of resources") + cmd.Flags().Int32Var(&pageNum, "page-num", PaginationPageDefault, "resources page number") + cmd.Flags().Int32Var(&pageSize, "page-size", PaginationSizeDefault, "resources page size") + cmd.Flags().StringVarP(&urn, "urn", "u", "", "URN of the module to view") return cmd } -func viewResourceCommand() *cobra.Command { - var output string +func cmdCreateResource() *cobra.Command { + var file string cmd := &cobra.Command{ - Use: "view ", - Short: "view a resource", - Example: heredoc.Doc(` - $ entropy resource view --out=json - `), - Annotations: map[string]string{ - "action:core": "true", - }, - Args: cobra.ExactArgs(1), + Use: "create", + Short: "Create a new resource on Entropy.", RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") - defer spinner.Stop() - - var reqBody entropyv1beta1.GetResourceRequest - reqBody.Urn = args[0] + var reqBody entropyv1beta1.Resource + if err := parseFile(file, &reqBody); err != nil { + return err + } - client, cancel, err := createClient(cmd) + client, cancel, err := createResourceServiceClient(cmd) if err != nil { return err } defer cancel() - res, err := client.GetResource(cmd.Context(), &reqBody) + req := &entropyv1beta1.CreateResourceRequest{ + Resource: &reqBody, + } + + spinner := printer.Spin("Creating resource...") + defer spinner.Stop() + res, err := client.CreateResource(cmd.Context(), req) if err != nil { return err } spinner.Stop() - if output == outputJSON || output == outputYAML || output == outputYML { - formattedOutput, err := formatOutput(res.GetResource(), output) - if err != nil { - return err - } - fmt.Println(term.Bluef(formattedOutput)) - } else { - r := res.GetResource() - - printer.Table(os.Stdout, [][]string{ - {"URN", "NAME", "KIND", "PROJECT", "STATUS"}, - {r.Urn, r.Name, r.Kind, r.Project, r.State.Status.String()}, - }) - - fmt.Println(term.Cyanf("\nTo view all the data in JSON/YAML format, use flag `-o json | yaml`")) - } - return nil + resource := res.GetResource() + return Display(cmd, resource, func(w io.Writer, v any) error { + _, _ = fmt.Fprintf(w, "Resource created with URN '%s'.\n", resource.Urn) + _, _ = fmt.Fprintln(w, "Use 'entropy resource get ' to view resource.") + return nil + }) }), } - cmd.Flags().StringVarP(&output, "out", "o", "", "output format, `-o json | yaml`") + cmd.Flags().StringVarP(&file, "file", "f", "", "path to the updated spec of resource") + cmd.MarkFlagRequired("file") return cmd } -func editResourceCommand() *cobra.Command { - var file string +func cmdEditResource() *cobra.Command { + var file, urn string cmd := &cobra.Command{ - Use: "edit ", - Short: "edit a resource", - Example: heredoc.Doc(` - $ entropy resource edit --file= - `), - Annotations: map[string]string{ - "action:core": "true", - }, - Args: cobra.ExactArgs(1), + Use: "edit", + Short: "Make updates to an existing resource", RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") - defer spinner.Stop() - var newSpec entropyv1beta1.ResourceSpec if err := parseFile(file, &newSpec); err != nil { return err - } else if err := newSpec.ValidateAll(); err != nil { - return err } - var reqBody entropyv1beta1.UpdateResourceRequest - reqBody.NewSpec = &newSpec - reqBody.Urn = args[0] + reqBody := entropyv1beta1.UpdateResourceRequest{ + Urn: urn, + NewSpec: &newSpec, + } if err := reqBody.ValidateAll(); err != nil { return err } - client, cancel, err := createClient(cmd) + client, cancel, err := createResourceServiceClient(cmd) if err != nil { return err } defer cancel() - _, err = client.UpdateResource(cmd.Context(), &reqBody) + spinner := printer.Spin("Updating resource...") + defer spinner.Stop() + resp, err := client.UpdateResource(cmd.Context(), &reqBody) if err != nil { return err } spinner.Stop() - fmt.Println(term.Greenf("Successfully updated")) - return nil + resource := resp.GetResource() + return Display(cmd, resource, func(w io.Writer, _ any) error { + _, _ = fmt.Fprintln(w, "Update request placed successfully.") + _, _ = fmt.Fprintln(w, "Use 'entropy resource get ' to view status.") + return nil + }) }), } cmd.Flags().StringVarP(&file, "file", "f", "", "path to the updated spec of resource") - + cmd.MarkFlagRequired("file") + cmd.Flags().StringVarP(&urn, "urn", "u", "", "URN of the resource to update") + cmd.MarkFlagRequired("urn") return cmd } -func deleteResourceCommand() *cobra.Command { +func cmdApplyAction() *cobra.Command { + var urn, file, actionName string cmd := &cobra.Command{ - Use: "delete ", - Short: "delete a resource", - Example: heredoc.Doc(` - $ entropy resource delete - `), - Annotations: map[string]string{ - "action:core": "true", - }, - Args: cobra.ExactArgs(1), + Use: "action", + Short: "Apply an action on an existing resource", + Aliases: []string{"execute"}, RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") - defer spinner.Stop() + var params structpb.Value + if file != "" { + if err := parseFile(file, ¶ms); err != nil { + return err + } + } + + reqBody := entropyv1beta1.ApplyActionRequest{ + Urn: urn, + Action: actionName, + Params: ¶ms, + } - var reqBody entropyv1beta1.DeleteResourceRequest - reqBody.Urn = args[0] + err := reqBody.ValidateAll() + if err != nil { + return err + } - client, cancel, err := createClient(cmd) + client, cancel, err := createResourceServiceClient(cmd) if err != nil { return err } defer cancel() - _, err = client.DeleteResource(cmd.Context(), &reqBody) + spinner := printer.Spin("Applying action...") + defer spinner.Stop() + res, err := client.ApplyAction(cmd.Context(), &reqBody) if err != nil { return err } spinner.Stop() - fmt.Println(term.Greenf("Successfully deleted")) - return nil + resource := res.GetResource() + return Display(cmd, resource, func(w io.Writer, v any) error { + _, _ = fmt.Fprintln(w, "Action request placed successfully.") + _, _ = fmt.Fprintln(w, "Use 'entropy resource get ' to view status.") + return nil + }) }), } + + cmd.Flags().StringVarP(&urn, "urn", "u", "", "urn of the resource") + cmd.Flags().StringVarP(&file, "file", "f", "", "path to the params file") + cmd.Flags().StringVarP(&actionName, "action", "a", "", "action to apply") + cmd.MarkFlagRequired("action") + return cmd } -func getRevisionsCommand() *cobra.Command { - var output string +func cmdDeleteResource() *cobra.Command { + var urn string cmd := &cobra.Command{ - Use: "revisions", - Short: "get revisions of a resource", - Example: heredoc.Doc(` - $ entropy resource revisions --out=json - `), - Annotations: map[string]string{ - "action:core": "true", - }, + Use: "delete", + Short: "Delete an existing resource.", + Aliases: []string{"rm", "del"}, RunE: handleErr(func(cmd *cobra.Command, args []string) error { - spinner := printer.Spin("") + client, cancel, err := createResourceServiceClient(cmd) + if err != nil { + return err + } + defer cancel() + + spinner := printer.Spin("Deleting resource...") defer spinner.Stop() + _, err = client.DeleteResource(cmd.Context(), &entropyv1beta1.DeleteResourceRequest{Urn: urn}) + if err != nil { + return err + } + spinner.Stop() + + return Display(cmd, nil, func(w io.Writer, v any) error { + _, _ = fmt.Fprintln(w, "Delete request placed successfully") + return nil + }) + }), + } + cmd.Flags().StringVarP(&urn, "urn", "u", "", "URN of the resource to delete") + cmd.MarkFlagRequired("urn") + + return cmd +} + +func cmdListRevisions() *cobra.Command { + var urn string + cmd := &cobra.Command{ + Use: "revisions", + Short: "List revisions of a resource.", + Aliases: []string{"revs"}, + RunE: handleErr(func(cmd *cobra.Command, args []string) error { var reqBody entropyv1beta1.GetResourceRevisionsRequest - reqBody.Urn = args[0] + reqBody.Urn = urn - client, cancel, err := createClient(cmd) + client, cancel, err := createResourceServiceClient(cmd) if err != nil { return err } defer cancel() - res, err := client.GetResourceRevisions(cmd.Context(), &reqBody) + req := &entropyv1beta1.GetResourceRevisionsRequest{Urn: urn} + + spinner := printer.Spin("Retrieving resource revisions...") + defer spinner.Stop() + res, err := client.GetResourceRevisions(cmd.Context(), req) if err != nil { return err } spinner.Stop() - if output == outputJSON || output == outputYAML || output == outputYML { - for _, rev := range res.GetRevisions() { - formattedOutput, err := formatOutput(rev, output) - if err != nil { - return err - } - fmt.Println(term.Bluef(formattedOutput)) - } - } else { + revisions := res.GetRevisions() + return Display(cmd, revisions, func(w io.Writer, v any) error { var report [][]string report = append(report, []string{"ID", "URN", "CREATED AT"}) - count := 0 for _, rev := range res.GetRevisions() { report = append(report, []string{rev.GetId(), rev.GetUrn(), rev.GetCreatedAt().AsTime().String()}) - count++ } printer.Table(os.Stdout, report) - fmt.Println("\nTotal: ", count) + _, _ = fmt.Fprintf(w, "Total: %d\n", len(report)-1) + return nil + }) + }), + } + + cmd.Flags().StringVarP(&urn, "urn", "u", "", "URN of the resource to view revisions") + cmd.MarkFlagRequired("urn") + + return cmd +} + +func cmdStreamLogs() *cobra.Command { + var urn string + var filter []string + cmd := &cobra.Command{ + Use: "logs", + Short: "Stream real-time logs for an existing resource.", + Aliases: []string{"logs"}, + RunE: handleErr(func(cmd *cobra.Command, args []string) error { + client, cancel, err := createResourceServiceClient(cmd) + if err != nil { + return err + } + defer cancel() + + filters := map[string]string{} + for _, f := range filter { + keyValue := strings.Split(f, ":") + filters[keyValue[0]] = keyValue[1] + } - fmt.Println(term.Cyanf("To view all the data in JSON/YAML format, use flag `-o json | yaml`")) + reqBody := &entropyv1beta1.GetLogRequest{ + Urn: urn, + Filter: filters, } + + if err := reqBody.ValidateAll(); err != nil { + return err + } + + spinner := printer.Spin("Preparing to stream logs...") + defer spinner.Stop() + stream, err := client.GetLog(cmd.Context(), reqBody) + if err != nil { + return err + } + spinner.Stop() + + for { + resp, err := stream.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return fmt.Errorf("failed to read stream: %w", err) + } + + chunk := resp.GetChunk() + _ = Display(cmd, chunk, func(w io.Writer, v any) error { + _, _ = fmt.Fprintln(w, string(chunk.GetData())) + return nil + }) + } + return nil }), } - cmd.Flags().StringVarP(&output, "out", "o", "", "output format, `-o json | yaml`") + cmd.Flags().StringSliceVarP(&filter, "filter", "f", nil, "Filter. (e.g., --filter=\"key:value\")") + cmd.Flags().StringVarP(&urn, "urn", "u", "", "URN of the resource to stream logs") + cmd.MarkFlagRequired("urn") return cmd } diff --git a/cli/serve.go b/cli/serve.go index aed4529f..51963ddc 100644 --- a/cli/serve.go +++ b/cli/serve.go @@ -7,18 +7,21 @@ import ( "github.com/newrelic/go-agent/v3/newrelic" "github.com/spf13/cobra" "go.uber.org/zap" - - "github.com/odpf/entropy/core" - "github.com/odpf/entropy/core/module" - entropyserver "github.com/odpf/entropy/internal/server" - "github.com/odpf/entropy/internal/store/postgres" - "github.com/odpf/entropy/modules" - "github.com/odpf/entropy/modules/firehose" - "github.com/odpf/entropy/modules/kubernetes" - "github.com/odpf/entropy/pkg/logger" - "github.com/odpf/entropy/pkg/telemetry" - "github.com/odpf/entropy/pkg/worker" - "github.com/odpf/entropy/pkg/worker/pgq" + "golang.org/x/sync/errgroup" + + "github.com/goto/entropy/core" + "github.com/goto/entropy/core/module" + entropyserver "github.com/goto/entropy/internal/server" + "github.com/goto/entropy/internal/store/postgres" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/dagger" + "github.com/goto/entropy/modules/firehose" + "github.com/goto/entropy/modules/flink" + "github.com/goto/entropy/modules/job" + "github.com/goto/entropy/modules/kafka" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/logger" + "github.com/goto/entropy/pkg/telemetry" ) func cmdServe() *cobra.Command { @@ -41,67 +44,64 @@ func cmdServe() *cobra.Command { return err } - zapLog, err := logger.New(&cfg.Log) - if err != nil { - return err - } - - telemetry.Init(cmd.Context(), cfg.Telemetry, zapLog) - nrApp, err := newrelic.NewApplication( - newrelic.ConfigAppName(cfg.Telemetry.ServiceName), - newrelic.ConfigLicense(cfg.Telemetry.NewRelicAPIKey), - ) - - if migrate { - if migrateErr := runMigrations(cmd.Context(), zapLog, cfg); migrateErr != nil { - return migrateErr - } - } - - asyncWorker := setupWorker(zapLog, cfg.Worker) - if spawnWorker { - go func() { - if runErr := asyncWorker.Run(cmd.Context()); runErr != nil { - zapLog.Error("worker exited with error", zap.Error(err)) - } - }() - } - - return runServer(cmd.Context(), nrApp, zapLog, cfg, asyncWorker) + return StartServer(cmd.Context(), cfg, migrate, spawnWorker) }) return cmd } -func runServer(baseCtx context.Context, nrApp *newrelic.Application, zapLog *zap.Logger, cfg Config, asyncWorker *worker.Worker) error { - ctx, cancel := context.WithCancel(baseCtx) - defer cancel() +func StartServer(ctx context.Context, cfg Config, migrate, spawnWorker bool) error { + err := logger.Setup(&cfg.Log) + if err != nil { + return err + } - store := setupStorage(zapLog, cfg.PGConnStr) - moduleService := module.NewService(setupRegistry(zapLog), store) - resourceService := core.New(store, moduleService, asyncWorker, time.Now, zapLog) + telemetry.Init(ctx, cfg.Telemetry) + nrApp, err := newrelic.NewApplication( + newrelic.ConfigAppName(cfg.Telemetry.ServiceName), + newrelic.ConfigLicense(cfg.Telemetry.NewRelicAPIKey), + ) - if err := asyncWorker.Register(core.JobKindSyncResource, resourceService.HandleSyncJob); err != nil { - return err + store := setupStorage(cfg.PGConnStr, cfg.Syncer, cfg.Service) + moduleService := module.NewService(setupRegistry(), store) + resourceService := core.New(store, moduleService, time.Now, cfg.Syncer.SyncBackoffInterval, cfg.Syncer.MaxRetries, cfg.Telemetry.ServiceName) + + if migrate { + if migrateErr := runMigrations(ctx, cfg); migrateErr != nil { + return migrateErr + } } - if err := asyncWorker.Register(core.JobKindScheduledSyncResource, resourceService.HandleSyncJob); err != nil { - return err + if spawnWorker { + eg := &errgroup.Group{} + spawnWorkers(ctx, resourceService, cfg.Syncer.Workers, cfg.Syncer.SyncInterval, eg) + go func() { + if err := eg.Wait(); err != nil { + zap.L().Error("syncer exited with error", zap.Error(err)) + } + }() } - return entropyserver.Serve(ctx, cfg.Service.addr(), nrApp, zapLog, resourceService, moduleService) + return entropyserver.Serve(ctx, + cfg.Service.httpAddr(), cfg.Service.grpcAddr(), + nrApp, resourceService, moduleService, + ) } -func setupRegistry(logger *zap.Logger) module.Registry { +func setupRegistry() module.Registry { supported := []module.Descriptor{ kubernetes.Module, firehose.Module, + job.Module, + kafka.Module, + flink.Module, + dagger.Module, } registry := &modules.Registry{} for _, desc := range supported { if err := registry.Register(desc); err != nil { - logger.Fatal("failed to register module", + zap.L().Fatal("failed to register module", zap.String("module_kind", desc.Kind), zap.Error(err), ) @@ -110,29 +110,10 @@ func setupRegistry(logger *zap.Logger) module.Registry { return registry } -func setupWorker(logger *zap.Logger, conf workerConf) *worker.Worker { - pgQueue, err := pgq.Open(conf.QueueSpec, conf.QueueName) - if err != nil { - logger.Fatal("failed to init postgres job-queue", zap.Error(err)) - } - - opts := []worker.Option{ - worker.WithLogger(logger.Named("worker")), - worker.WithRunConfig(conf.Threads, conf.PollInterval), - } - - asyncWorker, err := worker.New(pgQueue, opts...) - if err != nil { - logger.Fatal("failed to init worker instance", zap.Error(err)) - } - - return asyncWorker -} - -func setupStorage(logger *zap.Logger, pgConStr string) *postgres.Store { - store, err := postgres.Open(pgConStr) +func setupStorage(pgConStr string, syncCfg SyncerConf, serveCfg ServeConfig) *postgres.Store { + store, err := postgres.Open(pgConStr, syncCfg.RefreshInterval, syncCfg.ExtendLockBy, serveCfg.PaginationSizeDefault, serveCfg.PaginationPageDefault) if err != nil { - logger.Fatal("failed to connect to Postgres database", + zap.L().Fatal("failed to connect to Postgres database", zap.Error(err), zap.String("conn_str", pgConStr)) } return store diff --git a/cli/utils.go b/cli/utils.go index 67025e10..29ecbd55 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -1,23 +1,17 @@ package cli import ( - "errors" "fmt" "os" "path/filepath" "github.com/ghodss/yaml" + "github.com/goto/entropy/pkg/errors" "github.com/spf13/cobra" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/reflect/protoreflect" ) -const ( - outputJSON = "json" - outputYAML = "yaml" - outputYML = "yml" -) - type RunEFunc func(cmd *cobra.Command, args []string) error func parseFile(filePath string, v protoreflect.ProtoMessage) error { @@ -44,39 +38,12 @@ func parseFile(filePath string, v protoreflect.ProtoMessage) error { } default: - return errors.New("unsupported file type") // nolint + return errors.New("unsupported file type") } return nil } -func formatOutput(i protoreflect.ProtoMessage, format string) (string, error) { - marshalOpts := protojson.MarshalOptions{ - Indent: "\t", - Multiline: true, - UseProtoNames: true, - } - - b, e := marshalOpts.Marshal(i) - if e != nil { - return "", e - } - - switch format { - case outputJSON: - return string(b), nil - - case outputYAML, outputYML: - y, e := yaml.JSONToYAML(b) - if e != nil { - return "", e - } - return string(y), nil - default: - return "", errors.New("unsupported format") // nolint - } -} - func fatalExitf(format string, args ...interface{}) { fmt.Printf(format+"\n", args...) os.Exit(1) diff --git a/cli/version.go b/cli/version.go index 2fcbd04b..61922444 100644 --- a/cli/version.go +++ b/cli/version.go @@ -3,7 +3,7 @@ package cli import ( "github.com/spf13/cobra" - v "github.com/odpf/entropy/pkg/version" + v "github.com/goto/entropy/pkg/version" ) func cmdVersion() *cobra.Command { diff --git a/cli/worker.go b/cli/worker.go new file mode 100644 index 00000000..16016685 --- /dev/null +++ b/cli/worker.go @@ -0,0 +1,80 @@ +package cli + +import ( + "context" + "time" + + "github.com/MakeNowJust/heredoc" + "github.com/goto/entropy/core" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/pkg/logger" + "github.com/goto/entropy/pkg/telemetry" + "github.com/newrelic/go-agent/v3/newrelic" + "github.com/spf13/cobra" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +func cmdWorker() *cobra.Command { + cmd := &cobra.Command{ + Use: "worker", + Short: "Start workers", + Example: heredoc.Doc(` + $ entropy worker + `), + Annotations: map[string]string{ + "group:other": "server", + }, + } + + cmd.RunE = handleErr(func(cmd *cobra.Command, args []string) error { + cfg, err := loadConfig(cmd) + if err != nil { + return err + } + + err = logger.Setup(&cfg.Log) + if err != nil { + return err + } + + ctx := cmd.Context() + + telemetry.Init(ctx, cfg.Telemetry) + _, err = newrelic.NewApplication( + newrelic.ConfigAppName(cfg.Telemetry.ServiceName), + newrelic.ConfigLicense(cfg.Telemetry.NewRelicAPIKey), + ) + if err != nil { + zap.L().Error("error initializing opentelemetry", zap.Error(err)) + } + + return StartWorkers(ctx, cfg) + }) + + return cmd +} + +func StartWorkers(ctx context.Context, cfg Config) error { + store := setupStorage(cfg.PGConnStr, cfg.Syncer, cfg.Service) + moduleService := module.NewService(setupRegistry(), store) + resourceService := core.New(store, moduleService, time.Now, cfg.Syncer.SyncBackoffInterval, cfg.Syncer.MaxRetries, cfg.Telemetry.ServiceName) + + eg := &errgroup.Group{} + spawnWorkers(ctx, resourceService, cfg.Syncer.Workers, cfg.Syncer.SyncInterval, eg) + if err := eg.Wait(); err != nil { + return err + } + + return nil +} + +func spawnWorkers(ctx context.Context, resourceService *core.Service, workerModules map[string]WorkerConfig, syncInterval time.Duration, eg *errgroup.Group) { + if len(workerModules) == 0 { + resourceService.RunSyncer(ctx, 1, syncInterval, map[string][]string{}, eg) + } else { + for _, module := range workerModules { + resourceService.RunSyncer(ctx, module.Count, syncInterval, module.Scope, eg) + } + } +} diff --git a/core/core.go b/core/core.go index a4bdf02f..2f522eb3 100644 --- a/core/core.go +++ b/core/core.go @@ -1,62 +1,56 @@ package core -//go:generate mockery --name=AsyncWorker -r --case underscore --with-expecter --structname AsyncWorker --filename=async_worker.go --output=./mocks //go:generate mockery --name=ModuleService -r --case underscore --with-expecter --structname ModuleService --filename=module_service.go --output=./mocks import ( - context "context" + "context" "encoding/json" "time" - "go.uber.org/zap" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/worker" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" ) type Service struct { - logger *zap.Logger - clock func() time.Time - store resource.Store - worker AsyncWorker - moduleSvc ModuleService + clock func() time.Time + store resource.Store + moduleSvc ModuleService + syncBackoff time.Duration + maxSyncRetries int + serviceName string } type ModuleService interface { - PlanAction(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) + PlanAction(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) SyncState(ctx context.Context, res module.ExpandedResource) (*resource.State, error) StreamLogs(ctx context.Context, res module.ExpandedResource, filter map[string]string) (<-chan module.LogChunk, error) GetOutput(ctx context.Context, res module.ExpandedResource) (json.RawMessage, error) } -type AsyncWorker interface { - Enqueue(ctx context.Context, jobs ...worker.Job) error -} - -func New(repo resource.Store, moduleSvc ModuleService, asyncWorker AsyncWorker, clockFn func() time.Time, lg *zap.Logger) *Service { +func New(repo resource.Store, moduleSvc ModuleService, clockFn func() time.Time, syncBackoffInterval time.Duration, maxRetries int, serviceName string) *Service { if clockFn == nil { clockFn = time.Now } return &Service{ - logger: lg, - clock: clockFn, - store: repo, - worker: asyncWorker, - moduleSvc: moduleSvc, + clock: clockFn, + store: repo, + syncBackoff: syncBackoffInterval, + maxSyncRetries: maxRetries, + moduleSvc: moduleSvc, + serviceName: serviceName, } } -func (s *Service) generateModuleSpec(ctx context.Context, res resource.Resource) (*module.ExpandedResource, error) { +func (svc *Service) generateModuleSpec(ctx context.Context, res resource.Resource) (*module.ExpandedResource, error) { modSpec := module.ExpandedResource{ Resource: res, Dependencies: map[string]module.ResolvedDependency{}, } for key, resURN := range res.Spec.Dependencies { - d, err := s.GetResource(ctx, resURN) + d, err := svc.GetResource(ctx, resURN) if err != nil { if errors.Is(err, errors.ErrNotFound) { return nil, errors.ErrInvalid. diff --git a/core/core_test.go b/core/core_test.go index 825504e9..315f3ec0 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -6,9 +6,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/odpf/entropy/core" - "github.com/odpf/entropy/core/mocks" - "github.com/odpf/entropy/core/resource" + "github.com/goto/entropy/core" + "github.com/goto/entropy/core/mocks" + "github.com/goto/entropy/core/resource" ) var ( @@ -25,6 +25,6 @@ var ( func TestNew(t *testing.T) { t.Parallel() - s := core.New(&mocks.ResourceStore{}, &mocks.ModuleService{}, &mocks.AsyncWorker{}, deadClock, nil) + s := core.New(&mocks.ResourceStore{}, &mocks.ModuleService{}, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) assert.NotNil(t, s) } diff --git a/core/mocks/async_worker.go b/core/mocks/async_worker.go index d5c53398..ef62e849 100644 --- a/core/mocks/async_worker.go +++ b/core/mocks/async_worker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.23.1. DO NOT EDIT. package mocks @@ -7,7 +7,7 @@ import ( mock "github.com/stretchr/testify/mock" - worker "github.com/odpf/entropy/pkg/worker" + worker "github.com/goto/entropy/pkg/worker" ) // AsyncWorker is an autogenerated mock type for the AsyncWorker type @@ -50,8 +50,8 @@ type AsyncWorker_Enqueue_Call struct { } // Enqueue is a helper method to define mock.On call -// - ctx context.Context -// - jobs ...worker.Job +// - ctx context.Context +// - jobs ...worker.Job func (_e *AsyncWorker_Expecter) Enqueue(ctx interface{}, jobs ...interface{}) *AsyncWorker_Enqueue_Call { return &AsyncWorker_Enqueue_Call{Call: _e.mock.On("Enqueue", append([]interface{}{ctx}, jobs...)...)} @@ -74,3 +74,23 @@ func (_c *AsyncWorker_Enqueue_Call) Return(_a0 error) *AsyncWorker_Enqueue_Call _c.Call.Return(_a0) return _c } + +func (_c *AsyncWorker_Enqueue_Call) RunAndReturn(run func(context.Context, ...worker.Job) error) *AsyncWorker_Enqueue_Call { + _c.Call.Return(run) + return _c +} + +type mockConstructorTestingTNewAsyncWorker interface { + mock.TestingT + Cleanup(func()) +} + +// NewAsyncWorker creates a new instance of AsyncWorker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewAsyncWorker(t mockConstructorTestingTNewAsyncWorker) *AsyncWorker { + mock := &AsyncWorker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/mocks/driver.go b/core/mocks/driver.go index 243bc872..b7ea8c33 100644 --- a/core/mocks/driver.go +++ b/core/mocks/driver.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -8,9 +8,9 @@ import ( mock "github.com/stretchr/testify/mock" - module "github.com/odpf/entropy/core/module" + module "github.com/goto/entropy/core/module" - resource "github.com/odpf/entropy/core/resource" + resource "github.com/goto/entropy/core/resource" ) // ModuleDriver is an autogenerated mock type for the Driver type @@ -30,7 +30,15 @@ func (_m *ModuleDriver) EXPECT() *ModuleDriver_Expecter { func (_m *ModuleDriver) Output(ctx context.Context, res module.ExpandedResource) (json.RawMessage, error) { ret := _m.Called(ctx, res) + if len(ret) == 0 { + panic("no return value specified for Output") + } + var r0 json.RawMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) (json.RawMessage, error)); ok { + return rf(ctx, res) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) json.RawMessage); ok { r0 = rf(ctx, res) } else { @@ -39,7 +47,6 @@ func (_m *ModuleDriver) Output(ctx context.Context, res module.ExpandedResource) } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource) error); ok { r1 = rf(ctx, res) } else { @@ -55,8 +62,8 @@ type ModuleDriver_Output_Call struct { } // Output is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource +// - ctx context.Context +// - res module.ExpandedResource func (_e *ModuleDriver_Expecter) Output(ctx interface{}, res interface{}) *ModuleDriver_Output_Call { return &ModuleDriver_Output_Call{Call: _e.mock.On("Output", ctx, res)} } @@ -73,20 +80,32 @@ func (_c *ModuleDriver_Output_Call) Return(_a0 json.RawMessage, _a1 error) *Modu return _c } +func (_c *ModuleDriver_Output_Call) RunAndReturn(run func(context.Context, module.ExpandedResource) (json.RawMessage, error)) *ModuleDriver_Output_Call { + _c.Call.Return(run) + return _c +} + // Plan provides a mock function with given fields: ctx, res, act -func (_m *ModuleDriver) Plan(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { +func (_m *ModuleDriver) Plan(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { ret := _m.Called(ctx, res, act) - var r0 *module.Plan - if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) *module.Plan); ok { + if len(ret) == 0 { + panic("no return value specified for Plan") + } + + var r0 *resource.Resource + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) (*resource.Resource, error)); ok { + return rf(ctx, res, act) + } + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) *resource.Resource); ok { r0 = rf(ctx, res, act) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*module.Plan) + r0 = ret.Get(0).(*resource.Resource) } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource, module.ActionRequest) error); ok { r1 = rf(ctx, res, act) } else { @@ -102,9 +121,9 @@ type ModuleDriver_Plan_Call struct { } // Plan is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource -// - act module.ActionRequest +// - ctx context.Context +// - res module.ExpandedResource +// - act module.ActionRequest func (_e *ModuleDriver_Expecter) Plan(ctx interface{}, res interface{}, act interface{}) *ModuleDriver_Plan_Call { return &ModuleDriver_Plan_Call{Call: _e.mock.On("Plan", ctx, res, act)} } @@ -116,16 +135,29 @@ func (_c *ModuleDriver_Plan_Call) Run(run func(ctx context.Context, res module.E return _c } -func (_c *ModuleDriver_Plan_Call) Return(_a0 *module.Plan, _a1 error) *ModuleDriver_Plan_Call { +func (_c *ModuleDriver_Plan_Call) Return(_a0 *resource.Resource, _a1 error) *ModuleDriver_Plan_Call { _c.Call.Return(_a0, _a1) return _c } +func (_c *ModuleDriver_Plan_Call) RunAndReturn(run func(context.Context, module.ExpandedResource, module.ActionRequest) (*resource.Resource, error)) *ModuleDriver_Plan_Call { + _c.Call.Return(run) + return _c +} + // Sync provides a mock function with given fields: ctx, res func (_m *ModuleDriver) Sync(ctx context.Context, res module.ExpandedResource) (*resource.State, error) { ret := _m.Called(ctx, res) + if len(ret) == 0 { + panic("no return value specified for Sync") + } + var r0 *resource.State + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) (*resource.State, error)); ok { + return rf(ctx, res) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) *resource.State); ok { r0 = rf(ctx, res) } else { @@ -134,7 +166,6 @@ func (_m *ModuleDriver) Sync(ctx context.Context, res module.ExpandedResource) ( } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource) error); ok { r1 = rf(ctx, res) } else { @@ -150,8 +181,8 @@ type ModuleDriver_Sync_Call struct { } // Sync is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource +// - ctx context.Context +// - res module.ExpandedResource func (_e *ModuleDriver_Expecter) Sync(ctx interface{}, res interface{}) *ModuleDriver_Sync_Call { return &ModuleDriver_Sync_Call{Call: _e.mock.On("Sync", ctx, res)} } @@ -167,3 +198,22 @@ func (_c *ModuleDriver_Sync_Call) Return(_a0 *resource.State, _a1 error) *Module _c.Call.Return(_a0, _a1) return _c } + +func (_c *ModuleDriver_Sync_Call) RunAndReturn(run func(context.Context, module.ExpandedResource) (*resource.State, error)) *ModuleDriver_Sync_Call { + _c.Call.Return(run) + return _c +} + +// NewModuleDriver creates a new instance of ModuleDriver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewModuleDriver(t interface { + mock.TestingT + Cleanup(func()) +}) *ModuleDriver { + mock := &ModuleDriver{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/mocks/loggable_module.go b/core/mocks/loggable_module.go index edd56d97..952af3c3 100644 --- a/core/mocks/loggable_module.go +++ b/core/mocks/loggable_module.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -8,9 +8,9 @@ import ( mock "github.com/stretchr/testify/mock" - module "github.com/odpf/entropy/core/module" + module "github.com/goto/entropy/core/module" - resource "github.com/odpf/entropy/core/resource" + resource "github.com/goto/entropy/core/resource" ) // LoggableModule is an autogenerated mock type for the Loggable type @@ -30,7 +30,15 @@ func (_m *LoggableModule) EXPECT() *LoggableModule_Expecter { func (_m *LoggableModule) Log(ctx context.Context, res module.ExpandedResource, filter map[string]string) (<-chan module.LogChunk, error) { ret := _m.Called(ctx, res, filter) + if len(ret) == 0 { + panic("no return value specified for Log") + } + var r0 <-chan module.LogChunk + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, map[string]string) (<-chan module.LogChunk, error)); ok { + return rf(ctx, res, filter) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, map[string]string) <-chan module.LogChunk); ok { r0 = rf(ctx, res, filter) } else { @@ -39,7 +47,6 @@ func (_m *LoggableModule) Log(ctx context.Context, res module.ExpandedResource, } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource, map[string]string) error); ok { r1 = rf(ctx, res, filter) } else { @@ -55,9 +62,9 @@ type LoggableModule_Log_Call struct { } // Log is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource -// - filter map[string]string +// - ctx context.Context +// - res module.ExpandedResource +// - filter map[string]string func (_e *LoggableModule_Expecter) Log(ctx interface{}, res interface{}, filter interface{}) *LoggableModule_Log_Call { return &LoggableModule_Log_Call{Call: _e.mock.On("Log", ctx, res, filter)} } @@ -74,11 +81,24 @@ func (_c *LoggableModule_Log_Call) Return(_a0 <-chan module.LogChunk, _a1 error) return _c } +func (_c *LoggableModule_Log_Call) RunAndReturn(run func(context.Context, module.ExpandedResource, map[string]string) (<-chan module.LogChunk, error)) *LoggableModule_Log_Call { + _c.Call.Return(run) + return _c +} + // Output provides a mock function with given fields: ctx, res func (_m *LoggableModule) Output(ctx context.Context, res module.ExpandedResource) (json.RawMessage, error) { ret := _m.Called(ctx, res) + if len(ret) == 0 { + panic("no return value specified for Output") + } + var r0 json.RawMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) (json.RawMessage, error)); ok { + return rf(ctx, res) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) json.RawMessage); ok { r0 = rf(ctx, res) } else { @@ -87,7 +107,6 @@ func (_m *LoggableModule) Output(ctx context.Context, res module.ExpandedResourc } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource) error); ok { r1 = rf(ctx, res) } else { @@ -103,8 +122,8 @@ type LoggableModule_Output_Call struct { } // Output is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource +// - ctx context.Context +// - res module.ExpandedResource func (_e *LoggableModule_Expecter) Output(ctx interface{}, res interface{}) *LoggableModule_Output_Call { return &LoggableModule_Output_Call{Call: _e.mock.On("Output", ctx, res)} } @@ -121,20 +140,32 @@ func (_c *LoggableModule_Output_Call) Return(_a0 json.RawMessage, _a1 error) *Lo return _c } +func (_c *LoggableModule_Output_Call) RunAndReturn(run func(context.Context, module.ExpandedResource) (json.RawMessage, error)) *LoggableModule_Output_Call { + _c.Call.Return(run) + return _c +} + // Plan provides a mock function with given fields: ctx, res, act -func (_m *LoggableModule) Plan(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { +func (_m *LoggableModule) Plan(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { ret := _m.Called(ctx, res, act) - var r0 *module.Plan - if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) *module.Plan); ok { + if len(ret) == 0 { + panic("no return value specified for Plan") + } + + var r0 *resource.Resource + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) (*resource.Resource, error)); ok { + return rf(ctx, res, act) + } + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) *resource.Resource); ok { r0 = rf(ctx, res, act) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*module.Plan) + r0 = ret.Get(0).(*resource.Resource) } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource, module.ActionRequest) error); ok { r1 = rf(ctx, res, act) } else { @@ -150,9 +181,9 @@ type LoggableModule_Plan_Call struct { } // Plan is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource -// - act module.ActionRequest +// - ctx context.Context +// - res module.ExpandedResource +// - act module.ActionRequest func (_e *LoggableModule_Expecter) Plan(ctx interface{}, res interface{}, act interface{}) *LoggableModule_Plan_Call { return &LoggableModule_Plan_Call{Call: _e.mock.On("Plan", ctx, res, act)} } @@ -164,16 +195,29 @@ func (_c *LoggableModule_Plan_Call) Run(run func(ctx context.Context, res module return _c } -func (_c *LoggableModule_Plan_Call) Return(_a0 *module.Plan, _a1 error) *LoggableModule_Plan_Call { +func (_c *LoggableModule_Plan_Call) Return(_a0 *resource.Resource, _a1 error) *LoggableModule_Plan_Call { _c.Call.Return(_a0, _a1) return _c } +func (_c *LoggableModule_Plan_Call) RunAndReturn(run func(context.Context, module.ExpandedResource, module.ActionRequest) (*resource.Resource, error)) *LoggableModule_Plan_Call { + _c.Call.Return(run) + return _c +} + // Sync provides a mock function with given fields: ctx, res func (_m *LoggableModule) Sync(ctx context.Context, res module.ExpandedResource) (*resource.State, error) { ret := _m.Called(ctx, res) + if len(ret) == 0 { + panic("no return value specified for Sync") + } + var r0 *resource.State + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) (*resource.State, error)); ok { + return rf(ctx, res) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) *resource.State); ok { r0 = rf(ctx, res) } else { @@ -182,7 +226,6 @@ func (_m *LoggableModule) Sync(ctx context.Context, res module.ExpandedResource) } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource) error); ok { r1 = rf(ctx, res) } else { @@ -198,8 +241,8 @@ type LoggableModule_Sync_Call struct { } // Sync is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource +// - ctx context.Context +// - res module.ExpandedResource func (_e *LoggableModule_Expecter) Sync(ctx interface{}, res interface{}) *LoggableModule_Sync_Call { return &LoggableModule_Sync_Call{Call: _e.mock.On("Sync", ctx, res)} } @@ -215,3 +258,22 @@ func (_c *LoggableModule_Sync_Call) Return(_a0 *resource.State, _a1 error) *Logg _c.Call.Return(_a0, _a1) return _c } + +func (_c *LoggableModule_Sync_Call) RunAndReturn(run func(context.Context, module.ExpandedResource) (*resource.State, error)) *LoggableModule_Sync_Call { + _c.Call.Return(run) + return _c +} + +// NewLoggableModule creates a new instance of LoggableModule. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLoggableModule(t interface { + mock.TestingT + Cleanup(func()) +}) *LoggableModule { + mock := &LoggableModule{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/mocks/module_registry.go b/core/mocks/module_registry.go index a2a17f10..e2b9e445 100644 --- a/core/mocks/module_registry.go +++ b/core/mocks/module_registry.go @@ -1,11 +1,11 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks import ( context "context" - module "github.com/odpf/entropy/core/module" + module "github.com/goto/entropy/core/module" mock "github.com/stretchr/testify/mock" ) @@ -26,7 +26,16 @@ func (_m *ModuleRegistry) EXPECT() *ModuleRegistry_Expecter { func (_m *ModuleRegistry) GetDriver(ctx context.Context, mod module.Module) (module.Driver, module.Descriptor, error) { ret := _m.Called(ctx, mod) + if len(ret) == 0 { + panic("no return value specified for GetDriver") + } + var r0 module.Driver + var r1 module.Descriptor + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, module.Module) (module.Driver, module.Descriptor, error)); ok { + return rf(ctx, mod) + } if rf, ok := ret.Get(0).(func(context.Context, module.Module) module.Driver); ok { r0 = rf(ctx, mod) } else { @@ -35,14 +44,12 @@ func (_m *ModuleRegistry) GetDriver(ctx context.Context, mod module.Module) (mod } } - var r1 module.Descriptor if rf, ok := ret.Get(1).(func(context.Context, module.Module) module.Descriptor); ok { r1 = rf(ctx, mod) } else { r1 = ret.Get(1).(module.Descriptor) } - var r2 error if rf, ok := ret.Get(2).(func(context.Context, module.Module) error); ok { r2 = rf(ctx, mod) } else { @@ -58,8 +65,8 @@ type ModuleRegistry_GetDriver_Call struct { } // GetDriver is a helper method to define mock.On call -// - ctx context.Context -// - mod module.Module +// - ctx context.Context +// - mod module.Module func (_e *ModuleRegistry_Expecter) GetDriver(ctx interface{}, mod interface{}) *ModuleRegistry_GetDriver_Call { return &ModuleRegistry_GetDriver_Call{Call: _e.mock.On("GetDriver", ctx, mod)} } @@ -75,3 +82,22 @@ func (_c *ModuleRegistry_GetDriver_Call) Return(_a0 module.Driver, _a1 module.De _c.Call.Return(_a0, _a1, _a2) return _c } + +func (_c *ModuleRegistry_GetDriver_Call) RunAndReturn(run func(context.Context, module.Module) (module.Driver, module.Descriptor, error)) *ModuleRegistry_GetDriver_Call { + _c.Call.Return(run) + return _c +} + +// NewModuleRegistry creates a new instance of ModuleRegistry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewModuleRegistry(t interface { + mock.TestingT + Cleanup(func()) +}) *ModuleRegistry { + mock := &ModuleRegistry{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/mocks/module_service.go b/core/mocks/module_service.go index 2e5462e3..480fd4ab 100644 --- a/core/mocks/module_service.go +++ b/core/mocks/module_service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -9,9 +9,9 @@ import ( mock "github.com/stretchr/testify/mock" - module "github.com/odpf/entropy/core/module" + module "github.com/goto/entropy/core/module" - resource "github.com/odpf/entropy/core/resource" + resource "github.com/goto/entropy/core/resource" ) // ModuleService is an autogenerated mock type for the ModuleService type @@ -31,7 +31,15 @@ func (_m *ModuleService) EXPECT() *ModuleService_Expecter { func (_m *ModuleService) GetOutput(ctx context.Context, res module.ExpandedResource) (json.RawMessage, error) { ret := _m.Called(ctx, res) + if len(ret) == 0 { + panic("no return value specified for GetOutput") + } + var r0 json.RawMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) (json.RawMessage, error)); ok { + return rf(ctx, res) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) json.RawMessage); ok { r0 = rf(ctx, res) } else { @@ -40,7 +48,6 @@ func (_m *ModuleService) GetOutput(ctx context.Context, res module.ExpandedResou } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource) error); ok { r1 = rf(ctx, res) } else { @@ -56,8 +63,8 @@ type ModuleService_GetOutput_Call struct { } // GetOutput is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource +// - ctx context.Context +// - res module.ExpandedResource func (_e *ModuleService_Expecter) GetOutput(ctx interface{}, res interface{}) *ModuleService_GetOutput_Call { return &ModuleService_GetOutput_Call{Call: _e.mock.On("GetOutput", ctx, res)} } @@ -74,20 +81,32 @@ func (_c *ModuleService_GetOutput_Call) Return(_a0 json.RawMessage, _a1 error) * return _c } +func (_c *ModuleService_GetOutput_Call) RunAndReturn(run func(context.Context, module.ExpandedResource) (json.RawMessage, error)) *ModuleService_GetOutput_Call { + _c.Call.Return(run) + return _c +} + // PlanAction provides a mock function with given fields: ctx, res, act -func (_m *ModuleService) PlanAction(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { +func (_m *ModuleService) PlanAction(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { ret := _m.Called(ctx, res, act) - var r0 *module.Plan - if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) *module.Plan); ok { + if len(ret) == 0 { + panic("no return value specified for PlanAction") + } + + var r0 *resource.Resource + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) (*resource.Resource, error)); ok { + return rf(ctx, res, act) + } + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, module.ActionRequest) *resource.Resource); ok { r0 = rf(ctx, res, act) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*module.Plan) + r0 = ret.Get(0).(*resource.Resource) } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource, module.ActionRequest) error); ok { r1 = rf(ctx, res, act) } else { @@ -103,9 +122,9 @@ type ModuleService_PlanAction_Call struct { } // PlanAction is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource -// - act module.ActionRequest +// - ctx context.Context +// - res module.ExpandedResource +// - act module.ActionRequest func (_e *ModuleService_Expecter) PlanAction(ctx interface{}, res interface{}, act interface{}) *ModuleService_PlanAction_Call { return &ModuleService_PlanAction_Call{Call: _e.mock.On("PlanAction", ctx, res, act)} } @@ -117,16 +136,29 @@ func (_c *ModuleService_PlanAction_Call) Run(run func(ctx context.Context, res m return _c } -func (_c *ModuleService_PlanAction_Call) Return(_a0 *module.Plan, _a1 error) *ModuleService_PlanAction_Call { +func (_c *ModuleService_PlanAction_Call) Return(_a0 *resource.Resource, _a1 error) *ModuleService_PlanAction_Call { _c.Call.Return(_a0, _a1) return _c } +func (_c *ModuleService_PlanAction_Call) RunAndReturn(run func(context.Context, module.ExpandedResource, module.ActionRequest) (*resource.Resource, error)) *ModuleService_PlanAction_Call { + _c.Call.Return(run) + return _c +} + // StreamLogs provides a mock function with given fields: ctx, res, filter func (_m *ModuleService) StreamLogs(ctx context.Context, res module.ExpandedResource, filter map[string]string) (<-chan module.LogChunk, error) { ret := _m.Called(ctx, res, filter) + if len(ret) == 0 { + panic("no return value specified for StreamLogs") + } + var r0 <-chan module.LogChunk + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, map[string]string) (<-chan module.LogChunk, error)); ok { + return rf(ctx, res, filter) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource, map[string]string) <-chan module.LogChunk); ok { r0 = rf(ctx, res, filter) } else { @@ -135,7 +167,6 @@ func (_m *ModuleService) StreamLogs(ctx context.Context, res module.ExpandedReso } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource, map[string]string) error); ok { r1 = rf(ctx, res, filter) } else { @@ -151,9 +182,9 @@ type ModuleService_StreamLogs_Call struct { } // StreamLogs is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource -// - filter map[string]string +// - ctx context.Context +// - res module.ExpandedResource +// - filter map[string]string func (_e *ModuleService_Expecter) StreamLogs(ctx interface{}, res interface{}, filter interface{}) *ModuleService_StreamLogs_Call { return &ModuleService_StreamLogs_Call{Call: _e.mock.On("StreamLogs", ctx, res, filter)} } @@ -170,11 +201,24 @@ func (_c *ModuleService_StreamLogs_Call) Return(_a0 <-chan module.LogChunk, _a1 return _c } +func (_c *ModuleService_StreamLogs_Call) RunAndReturn(run func(context.Context, module.ExpandedResource, map[string]string) (<-chan module.LogChunk, error)) *ModuleService_StreamLogs_Call { + _c.Call.Return(run) + return _c +} + // SyncState provides a mock function with given fields: ctx, res func (_m *ModuleService) SyncState(ctx context.Context, res module.ExpandedResource) (*resource.State, error) { ret := _m.Called(ctx, res) + if len(ret) == 0 { + panic("no return value specified for SyncState") + } + var r0 *resource.State + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) (*resource.State, error)); ok { + return rf(ctx, res) + } if rf, ok := ret.Get(0).(func(context.Context, module.ExpandedResource) *resource.State); ok { r0 = rf(ctx, res) } else { @@ -183,7 +227,6 @@ func (_m *ModuleService) SyncState(ctx context.Context, res module.ExpandedResou } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.ExpandedResource) error); ok { r1 = rf(ctx, res) } else { @@ -199,8 +242,8 @@ type ModuleService_SyncState_Call struct { } // SyncState is a helper method to define mock.On call -// - ctx context.Context -// - res module.ExpandedResource +// - ctx context.Context +// - res module.ExpandedResource func (_e *ModuleService_Expecter) SyncState(ctx interface{}, res interface{}) *ModuleService_SyncState_Call { return &ModuleService_SyncState_Call{Call: _e.mock.On("SyncState", ctx, res)} } @@ -216,3 +259,22 @@ func (_c *ModuleService_SyncState_Call) Return(_a0 *resource.State, _a1 error) * _c.Call.Return(_a0, _a1) return _c } + +func (_c *ModuleService_SyncState_Call) RunAndReturn(run func(context.Context, module.ExpandedResource) (*resource.State, error)) *ModuleService_SyncState_Call { + _c.Call.Return(run) + return _c +} + +// NewModuleService creates a new instance of ModuleService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewModuleService(t interface { + mock.TestingT + Cleanup(func()) +}) *ModuleService { + mock := &ModuleService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/mocks/module_store.go b/core/mocks/module_store.go index 3b1cbbf5..1846e05a 100644 --- a/core/mocks/module_store.go +++ b/core/mocks/module_store.go @@ -1,11 +1,11 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks import ( context "context" - module "github.com/odpf/entropy/core/module" + module "github.com/goto/entropy/core/module" mock "github.com/stretchr/testify/mock" ) @@ -26,6 +26,10 @@ func (_m *ModuleStore) EXPECT() *ModuleStore_Expecter { func (_m *ModuleStore) CreateModule(ctx context.Context, m module.Module) error { ret := _m.Called(ctx, m) + if len(ret) == 0 { + panic("no return value specified for CreateModule") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, module.Module) error); ok { r0 = rf(ctx, m) @@ -42,8 +46,8 @@ type ModuleStore_CreateModule_Call struct { } // CreateModule is a helper method to define mock.On call -// - ctx context.Context -// - m module.Module +// - ctx context.Context +// - m module.Module func (_e *ModuleStore_Expecter) CreateModule(ctx interface{}, m interface{}) *ModuleStore_CreateModule_Call { return &ModuleStore_CreateModule_Call{Call: _e.mock.On("CreateModule", ctx, m)} } @@ -60,10 +64,19 @@ func (_c *ModuleStore_CreateModule_Call) Return(_a0 error) *ModuleStore_CreateMo return _c } +func (_c *ModuleStore_CreateModule_Call) RunAndReturn(run func(context.Context, module.Module) error) *ModuleStore_CreateModule_Call { + _c.Call.Return(run) + return _c +} + // DeleteModule provides a mock function with given fields: ctx, urn func (_m *ModuleStore) DeleteModule(ctx context.Context, urn string) error { ret := _m.Called(ctx, urn) + if len(ret) == 0 { + panic("no return value specified for DeleteModule") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, urn) @@ -80,8 +93,8 @@ type ModuleStore_DeleteModule_Call struct { } // DeleteModule is a helper method to define mock.On call -// - ctx context.Context -// - urn string +// - ctx context.Context +// - urn string func (_e *ModuleStore_Expecter) DeleteModule(ctx interface{}, urn interface{}) *ModuleStore_DeleteModule_Call { return &ModuleStore_DeleteModule_Call{Call: _e.mock.On("DeleteModule", ctx, urn)} } @@ -98,11 +111,24 @@ func (_c *ModuleStore_DeleteModule_Call) Return(_a0 error) *ModuleStore_DeleteMo return _c } +func (_c *ModuleStore_DeleteModule_Call) RunAndReturn(run func(context.Context, string) error) *ModuleStore_DeleteModule_Call { + _c.Call.Return(run) + return _c +} + // GetModule provides a mock function with given fields: ctx, urn func (_m *ModuleStore) GetModule(ctx context.Context, urn string) (*module.Module, error) { ret := _m.Called(ctx, urn) + if len(ret) == 0 { + panic("no return value specified for GetModule") + } + var r0 *module.Module + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*module.Module, error)); ok { + return rf(ctx, urn) + } if rf, ok := ret.Get(0).(func(context.Context, string) *module.Module); ok { r0 = rf(ctx, urn) } else { @@ -111,7 +137,6 @@ func (_m *ModuleStore) GetModule(ctx context.Context, urn string) (*module.Modul } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, urn) } else { @@ -127,8 +152,8 @@ type ModuleStore_GetModule_Call struct { } // GetModule is a helper method to define mock.On call -// - ctx context.Context -// - urn string +// - ctx context.Context +// - urn string func (_e *ModuleStore_Expecter) GetModule(ctx interface{}, urn interface{}) *ModuleStore_GetModule_Call { return &ModuleStore_GetModule_Call{Call: _e.mock.On("GetModule", ctx, urn)} } @@ -145,11 +170,24 @@ func (_c *ModuleStore_GetModule_Call) Return(_a0 *module.Module, _a1 error) *Mod return _c } +func (_c *ModuleStore_GetModule_Call) RunAndReturn(run func(context.Context, string) (*module.Module, error)) *ModuleStore_GetModule_Call { + _c.Call.Return(run) + return _c +} + // ListModules provides a mock function with given fields: ctx, project func (_m *ModuleStore) ListModules(ctx context.Context, project string) ([]module.Module, error) { ret := _m.Called(ctx, project) + if len(ret) == 0 { + panic("no return value specified for ListModules") + } + var r0 []module.Module + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]module.Module, error)); ok { + return rf(ctx, project) + } if rf, ok := ret.Get(0).(func(context.Context, string) []module.Module); ok { r0 = rf(ctx, project) } else { @@ -158,7 +196,6 @@ func (_m *ModuleStore) ListModules(ctx context.Context, project string) ([]modul } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, project) } else { @@ -174,8 +211,8 @@ type ModuleStore_ListModules_Call struct { } // ListModules is a helper method to define mock.On call -// - ctx context.Context -// - project string +// - ctx context.Context +// - project string func (_e *ModuleStore_Expecter) ListModules(ctx interface{}, project interface{}) *ModuleStore_ListModules_Call { return &ModuleStore_ListModules_Call{Call: _e.mock.On("ListModules", ctx, project)} } @@ -192,10 +229,19 @@ func (_c *ModuleStore_ListModules_Call) Return(_a0 []module.Module, _a1 error) * return _c } +func (_c *ModuleStore_ListModules_Call) RunAndReturn(run func(context.Context, string) ([]module.Module, error)) *ModuleStore_ListModules_Call { + _c.Call.Return(run) + return _c +} + // UpdateModule provides a mock function with given fields: ctx, m func (_m *ModuleStore) UpdateModule(ctx context.Context, m module.Module) error { ret := _m.Called(ctx, m) + if len(ret) == 0 { + panic("no return value specified for UpdateModule") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, module.Module) error); ok { r0 = rf(ctx, m) @@ -212,8 +258,8 @@ type ModuleStore_UpdateModule_Call struct { } // UpdateModule is a helper method to define mock.On call -// - ctx context.Context -// - m module.Module +// - ctx context.Context +// - m module.Module func (_e *ModuleStore_Expecter) UpdateModule(ctx interface{}, m interface{}) *ModuleStore_UpdateModule_Call { return &ModuleStore_UpdateModule_Call{Call: _e.mock.On("UpdateModule", ctx, m)} } @@ -229,3 +275,22 @@ func (_c *ModuleStore_UpdateModule_Call) Return(_a0 error) *ModuleStore_UpdateMo _c.Call.Return(_a0) return _c } + +func (_c *ModuleStore_UpdateModule_Call) RunAndReturn(run func(context.Context, module.Module) error) *ModuleStore_UpdateModule_Call { + _c.Call.Return(run) + return _c +} + +// NewModuleStore creates a new instance of ModuleStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewModuleStore(t interface { + mock.TestingT + Cleanup(func()) +}) *ModuleStore { + mock := &ModuleStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/mocks/resource_store.go b/core/mocks/resource_store.go index b8d88731..df3aebe5 100644 --- a/core/mocks/resource_store.go +++ b/core/mocks/resource_store.go @@ -1,11 +1,11 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks import ( context "context" - resource "github.com/odpf/entropy/core/resource" + resource "github.com/goto/entropy/core/resource" mock "github.com/stretchr/testify/mock" ) @@ -33,6 +33,10 @@ func (_m *ResourceStore) Create(ctx context.Context, r resource.Resource, hooks _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, resource.Resource, ...resource.MutationHook) error); ok { r0 = rf(ctx, r, hooks...) @@ -49,9 +53,9 @@ type ResourceStore_Create_Call struct { } // Create is a helper method to define mock.On call -// - ctx context.Context -// - r resource.Resource -// - hooks ...resource.MutationHook +// - ctx context.Context +// - r resource.Resource +// - hooks ...resource.MutationHook func (_e *ResourceStore_Expecter) Create(ctx interface{}, r interface{}, hooks ...interface{}) *ResourceStore_Create_Call { return &ResourceStore_Create_Call{Call: _e.mock.On("Create", append([]interface{}{ctx, r}, hooks...)...)} @@ -75,6 +79,11 @@ func (_c *ResourceStore_Create_Call) Return(_a0 error) *ResourceStore_Create_Cal return _c } +func (_c *ResourceStore_Create_Call) RunAndReturn(run func(context.Context, resource.Resource, ...resource.MutationHook) error) *ResourceStore_Create_Call { + _c.Call.Return(run) + return _c +} + // Delete provides a mock function with given fields: ctx, urn, hooks func (_m *ResourceStore) Delete(ctx context.Context, urn string, hooks ...resource.MutationHook) error { _va := make([]interface{}, len(hooks)) @@ -86,6 +95,10 @@ func (_m *ResourceStore) Delete(ctx context.Context, urn string, hooks ...resour _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Delete") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, ...resource.MutationHook) error); ok { r0 = rf(ctx, urn, hooks...) @@ -102,9 +115,9 @@ type ResourceStore_Delete_Call struct { } // Delete is a helper method to define mock.On call -// - ctx context.Context -// - urn string -// - hooks ...resource.MutationHook +// - ctx context.Context +// - urn string +// - hooks ...resource.MutationHook func (_e *ResourceStore_Expecter) Delete(ctx interface{}, urn interface{}, hooks ...interface{}) *ResourceStore_Delete_Call { return &ResourceStore_Delete_Call{Call: _e.mock.On("Delete", append([]interface{}{ctx, urn}, hooks...)...)} @@ -128,11 +141,24 @@ func (_c *ResourceStore_Delete_Call) Return(_a0 error) *ResourceStore_Delete_Cal return _c } +func (_c *ResourceStore_Delete_Call) RunAndReturn(run func(context.Context, string, ...resource.MutationHook) error) *ResourceStore_Delete_Call { + _c.Call.Return(run) + return _c +} + // GetByURN provides a mock function with given fields: ctx, urn func (_m *ResourceStore) GetByURN(ctx context.Context, urn string) (*resource.Resource, error) { ret := _m.Called(ctx, urn) + if len(ret) == 0 { + panic("no return value specified for GetByURN") + } + var r0 *resource.Resource + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*resource.Resource, error)); ok { + return rf(ctx, urn) + } if rf, ok := ret.Get(0).(func(context.Context, string) *resource.Resource); ok { r0 = rf(ctx, urn) } else { @@ -141,7 +167,6 @@ func (_m *ResourceStore) GetByURN(ctx context.Context, urn string) (*resource.Re } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, urn) } else { @@ -157,8 +182,8 @@ type ResourceStore_GetByURN_Call struct { } // GetByURN is a helper method to define mock.On call -// - ctx context.Context -// - urn string +// - ctx context.Context +// - urn string func (_e *ResourceStore_Expecter) GetByURN(ctx interface{}, urn interface{}) *ResourceStore_GetByURN_Call { return &ResourceStore_GetByURN_Call{Call: _e.mock.On("GetByURN", ctx, urn)} } @@ -175,22 +200,34 @@ func (_c *ResourceStore_GetByURN_Call) Return(_a0 *resource.Resource, _a1 error) return _c } -// List provides a mock function with given fields: ctx, filter -func (_m *ResourceStore) List(ctx context.Context, filter resource.Filter) ([]resource.Resource, error) { - ret := _m.Called(ctx, filter) +func (_c *ResourceStore_GetByURN_Call) RunAndReturn(run func(context.Context, string) (*resource.Resource, error)) *ResourceStore_GetByURN_Call { + _c.Call.Return(run) + return _c +} + +// List provides a mock function with given fields: ctx, filter, withSpecConfigs +func (_m *ResourceStore) List(ctx context.Context, filter resource.Filter, withSpecConfigs bool) ([]resource.Resource, error) { + ret := _m.Called(ctx, filter, withSpecConfigs) + + if len(ret) == 0 { + panic("no return value specified for List") + } var r0 []resource.Resource - if rf, ok := ret.Get(0).(func(context.Context, resource.Filter) []resource.Resource); ok { - r0 = rf(ctx, filter) + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, resource.Filter, bool) ([]resource.Resource, error)); ok { + return rf(ctx, filter, withSpecConfigs) + } + if rf, ok := ret.Get(0).(func(context.Context, resource.Filter, bool) []resource.Resource); ok { + r0 = rf(ctx, filter, withSpecConfigs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]resource.Resource) } } - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, resource.Filter) error); ok { - r1 = rf(ctx, filter) + if rf, ok := ret.Get(1).(func(context.Context, resource.Filter, bool) error); ok { + r1 = rf(ctx, filter, withSpecConfigs) } else { r1 = ret.Error(1) } @@ -204,15 +241,16 @@ type ResourceStore_List_Call struct { } // List is a helper method to define mock.On call -// - ctx context.Context -// - filter resource.Filter -func (_e *ResourceStore_Expecter) List(ctx interface{}, filter interface{}) *ResourceStore_List_Call { - return &ResourceStore_List_Call{Call: _e.mock.On("List", ctx, filter)} +// - ctx context.Context +// - filter resource.Filter +// - withSpecConfigs bool +func (_e *ResourceStore_Expecter) List(ctx interface{}, filter interface{}, withSpecConfigs interface{}) *ResourceStore_List_Call { + return &ResourceStore_List_Call{Call: _e.mock.On("List", ctx, filter, withSpecConfigs)} } -func (_c *ResourceStore_List_Call) Run(run func(ctx context.Context, filter resource.Filter)) *ResourceStore_List_Call { +func (_c *ResourceStore_List_Call) Run(run func(ctx context.Context, filter resource.Filter, withSpecConfigs bool)) *ResourceStore_List_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(resource.Filter)) + run(args[0].(context.Context), args[1].(resource.Filter), args[2].(bool)) }) return _c } @@ -222,11 +260,24 @@ func (_c *ResourceStore_List_Call) Return(_a0 []resource.Resource, _a1 error) *R return _c } +func (_c *ResourceStore_List_Call) RunAndReturn(run func(context.Context, resource.Filter, bool) ([]resource.Resource, error)) *ResourceStore_List_Call { + _c.Call.Return(run) + return _c +} + // Revisions provides a mock function with given fields: ctx, selector func (_m *ResourceStore) Revisions(ctx context.Context, selector resource.RevisionsSelector) ([]resource.Revision, error) { ret := _m.Called(ctx, selector) + if len(ret) == 0 { + panic("no return value specified for Revisions") + } + var r0 []resource.Revision + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, resource.RevisionsSelector) ([]resource.Revision, error)); ok { + return rf(ctx, selector) + } if rf, ok := ret.Get(0).(func(context.Context, resource.RevisionsSelector) []resource.Revision); ok { r0 = rf(ctx, selector) } else { @@ -235,7 +286,6 @@ func (_m *ResourceStore) Revisions(ctx context.Context, selector resource.Revisi } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, resource.RevisionsSelector) error); ok { r1 = rf(ctx, selector) } else { @@ -251,8 +301,8 @@ type ResourceStore_Revisions_Call struct { } // Revisions is a helper method to define mock.On call -// - ctx context.Context -// - selector resource.RevisionsSelector +// - ctx context.Context +// - selector resource.RevisionsSelector func (_e *ResourceStore_Expecter) Revisions(ctx interface{}, selector interface{}) *ResourceStore_Revisions_Call { return &ResourceStore_Revisions_Call{Call: _e.mock.On("Revisions", ctx, selector)} } @@ -269,6 +319,59 @@ func (_c *ResourceStore_Revisions_Call) Return(_a0 []resource.Revision, _a1 erro return _c } +func (_c *ResourceStore_Revisions_Call) RunAndReturn(run func(context.Context, resource.RevisionsSelector) ([]resource.Revision, error)) *ResourceStore_Revisions_Call { + _c.Call.Return(run) + return _c +} + +// SyncOne provides a mock function with given fields: ctx, scope, syncFn +func (_m *ResourceStore) SyncOne(ctx context.Context, scope map[string][]string, syncFn resource.SyncFn) error { + ret := _m.Called(ctx, scope, syncFn) + + if len(ret) == 0 { + panic("no return value specified for SyncOne") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, map[string][]string, resource.SyncFn) error); ok { + r0 = rf(ctx, scope, syncFn) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ResourceStore_SyncOne_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncOne' +type ResourceStore_SyncOne_Call struct { + *mock.Call +} + +// SyncOne is a helper method to define mock.On call +// - ctx context.Context +// - scope map[string][]string +// - syncFn resource.SyncFn +func (_e *ResourceStore_Expecter) SyncOne(ctx interface{}, scope interface{}, syncFn interface{}) *ResourceStore_SyncOne_Call { + return &ResourceStore_SyncOne_Call{Call: _e.mock.On("SyncOne", ctx, scope, syncFn)} +} + +func (_c *ResourceStore_SyncOne_Call) Run(run func(ctx context.Context, scope map[string][]string, syncFn resource.SyncFn)) *ResourceStore_SyncOne_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(map[string][]string), args[2].(resource.SyncFn)) + }) + return _c +} + +func (_c *ResourceStore_SyncOne_Call) Return(_a0 error) *ResourceStore_SyncOne_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ResourceStore_SyncOne_Call) RunAndReturn(run func(context.Context, map[string][]string, resource.SyncFn) error) *ResourceStore_SyncOne_Call { + _c.Call.Return(run) + return _c +} + // Update provides a mock function with given fields: ctx, r, saveRevision, reason, hooks func (_m *ResourceStore) Update(ctx context.Context, r resource.Resource, saveRevision bool, reason string, hooks ...resource.MutationHook) error { _va := make([]interface{}, len(hooks)) @@ -280,6 +383,10 @@ func (_m *ResourceStore) Update(ctx context.Context, r resource.Resource, saveRe _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, resource.Resource, bool, string, ...resource.MutationHook) error); ok { r0 = rf(ctx, r, saveRevision, reason, hooks...) @@ -296,11 +403,11 @@ type ResourceStore_Update_Call struct { } // Update is a helper method to define mock.On call -// - ctx context.Context -// - r resource.Resource -// - saveRevision bool -// - reason string -// - hooks ...resource.MutationHook +// - ctx context.Context +// - r resource.Resource +// - saveRevision bool +// - reason string +// - hooks ...resource.MutationHook func (_e *ResourceStore_Expecter) Update(ctx interface{}, r interface{}, saveRevision interface{}, reason interface{}, hooks ...interface{}) *ResourceStore_Update_Call { return &ResourceStore_Update_Call{Call: _e.mock.On("Update", append([]interface{}{ctx, r, saveRevision, reason}, hooks...)...)} @@ -323,3 +430,22 @@ func (_c *ResourceStore_Update_Call) Return(_a0 error) *ResourceStore_Update_Cal _c.Call.Return(_a0) return _c } + +func (_c *ResourceStore_Update_Call) RunAndReturn(run func(context.Context, resource.Resource, bool, string, ...resource.MutationHook) error) *ResourceStore_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewResourceStore creates a new instance of ResourceStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewResourceStore(t interface { + mock.TestingT + Cleanup(func()) +}) *ResourceStore { + mock := &ResourceStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/module/action.go b/core/module/action.go index a2fc611a..3cc964a2 100644 --- a/core/module/action.go +++ b/core/module/action.go @@ -6,7 +6,7 @@ import ( "github.com/xeipuuv/gojsonschema" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) const ( @@ -20,6 +20,7 @@ type ActionRequest struct { Name string `json:"name"` Params json.RawMessage `json:"params"` Labels map[string]string `json:"labels"` + UserID string } // ActionDesc is a descriptor for an action supported by a module. diff --git a/core/module/driver.go b/core/module/driver.go index f52be7e7..cdcd86da 100644 --- a/core/module/driver.go +++ b/core/module/driver.go @@ -6,9 +6,8 @@ package module import ( "context" "encoding/json" - "time" - "github.com/odpf/entropy/core/resource" + "github.com/goto/entropy/core/resource" ) // Driver is responsible for achieving desired external system states based @@ -17,7 +16,7 @@ type Driver interface { // Plan SHOULD validate the action on the current version of the resource, // return the resource with config/status/state changes (if any) applied. // Plan SHOULD NOT have side effects on anything other than the resource. - Plan(ctx context.Context, res ExpandedResource, act ActionRequest) (*Plan, error) + Plan(ctx context.Context, res ExpandedResource, act ActionRequest) (*resource.Resource, error) // Sync is called repeatedly by Entropy core until the returned state is // a terminal status. Driver implementation is free to execute an action @@ -32,13 +31,6 @@ type Driver interface { Output(ctx context.Context, res ExpandedResource) (json.RawMessage, error) } -// Plan represents the changes to be staged and later synced by module. -type Plan struct { - Resource resource.Resource - ScheduleRunAt time.Time - Reason string -} - // Loggable extension of driver allows streaming log data for a resource. type Loggable interface { Driver diff --git a/core/module/module.go b/core/module/module.go index f5fa1e93..a5611be9 100644 --- a/core/module/module.go +++ b/core/module/module.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) // Module represents all the data needed to initialize a particular module. diff --git a/core/module/service.go b/core/module/service.go index da9257e0..d2c5ddef 100644 --- a/core/module/service.go +++ b/core/module/service.go @@ -5,8 +5,8 @@ import ( "encoding/json" "fmt" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" ) type Service struct { @@ -21,7 +21,7 @@ func NewService(registry Registry, store Store) *Service { } } -func (mr *Service) PlanAction(ctx context.Context, res ExpandedResource, act ActionRequest) (*Plan, error) { +func (mr *Service) PlanAction(ctx context.Context, res ExpandedResource, act ActionRequest) (*resource.Resource, error) { mod, err := mr.discoverModule(ctx, res.Kind, res.Project) if err != nil { return nil, err diff --git a/core/read.go b/core/read.go index 0ab96e08..36f67060 100644 --- a/core/read.go +++ b/core/read.go @@ -1,15 +1,16 @@ package core import ( + "bytes" "context" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" ) -func (s *Service) GetResource(ctx context.Context, urn string) (*resource.Resource, error) { - res, err := s.store.GetByURN(ctx, urn) +func (svc *Service) GetResource(ctx context.Context, urn string) (*resource.Resource, error) { + res, err := svc.store.GetByURN(ctx, urn) if err != nil { if errors.Is(err, errors.ErrNotFound) { return nil, errors.ErrNotFound.WithMsgf("resource with urn '%s' not found", urn) @@ -17,41 +18,52 @@ func (s *Service) GetResource(ctx context.Context, urn string) (*resource.Resour return nil, errors.ErrInternal.WithCausef(err.Error()) } - modSpec, err := s.generateModuleSpec(ctx, *res) + modSpec, err := svc.generateModuleSpec(ctx, *res) if err != nil { return nil, err } - output, err := s.moduleSvc.GetOutput(ctx, *modSpec) + output, err := svc.moduleSvc.GetOutput(ctx, *modSpec) if err != nil { return nil, err } - res.State.Output = output + if !bytes.Equal(res.State.Output, output) { + res.State.Output = output + err = svc.store.Update(ctx, *res, false, "") + if err != nil { + return nil, err + } + } return res, nil } -func (s *Service) ListResources(ctx context.Context, filter resource.Filter) ([]resource.Resource, error) { - resources, err := s.store.List(ctx, filter) +func (svc *Service) ListResources(ctx context.Context, filter resource.Filter, withSpecConfigs bool) (resource.PagedResource, error) { + resources, err := svc.store.List(ctx, filter, withSpecConfigs) if err != nil { - return nil, errors.ErrInternal.WithCausef(err.Error()) + return resource.PagedResource{}, errors.ErrInternal.WithCausef(err.Error()) } - return filter.Apply(resources), nil + + resources = filter.Apply(resources) + return resource.PagedResource{ + Count: int32(len(resources)), + Resources: resources, + }, nil } -func (s *Service) GetLog(ctx context.Context, urn string, filter map[string]string) (<-chan module.LogChunk, error) { - res, err := s.GetResource(ctx, urn) +func (svc *Service) GetLog(ctx context.Context, urn string, filter map[string]string) (<-chan module.LogChunk, error) { + res, err := svc.GetResource(ctx, urn) if err != nil { return nil, err } - modSpec, err := s.generateModuleSpec(ctx, *res) + modSpec, err := svc.generateModuleSpec(ctx, *res) if err != nil { return nil, err } - logCh, err := s.moduleSvc.StreamLogs(ctx, *modSpec, filter) + logCh, err := svc.moduleSvc.StreamLogs(ctx, *modSpec, filter) if err != nil { if errors.Is(err, errors.ErrUnsupported) { return nil, errors.ErrUnsupported.WithMsgf("log streaming not supported for kind '%s'", res.Kind) @@ -61,8 +73,8 @@ func (s *Service) GetLog(ctx context.Context, urn string, filter map[string]stri return logCh, nil } -func (s *Service) GetRevisions(ctx context.Context, selector resource.RevisionsSelector) ([]resource.Revision, error) { - revs, err := s.store.Revisions(ctx, selector) +func (svc *Service) GetRevisions(ctx context.Context, selector resource.RevisionsSelector) ([]resource.Revision, error) { + revs, err := svc.store.Revisions(ctx, selector) if err != nil { return nil, errors.ErrInternal.WithCausef(err.Error()) } diff --git a/core/read_test.go b/core/read_test.go index 0c79bd16..660d46f4 100644 --- a/core/read_test.go +++ b/core/read_test.go @@ -3,14 +3,21 @@ package core_test import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "github.com/odpf/entropy/core" - "github.com/odpf/entropy/core/mocks" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core" + "github.com/goto/entropy/core/mocks" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" +) + +const ( + defaultMaxRetries = 5 + defaultSyncBackoff = 5 * time.Second + serviceName = "test-service" ) func TestService_GetResource(t *testing.T) { @@ -32,7 +39,7 @@ func TestService_GetResource(t *testing.T) { GetByURN(mock.Anything, mock.Anything). Return(nil, errors.ErrNotFound). Once() - return core.New(repo, nil, &mocks.AsyncWorker{}, nil, nil) + return core.New(repo, nil, nil, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "foo:bar:baz", wantErr: errors.ErrNotFound, @@ -52,7 +59,7 @@ func TestService_GetResource(t *testing.T) { Return(nil, nil). Once() - return core.New(repo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(repo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "foo:bar:baz", want: &sampleResource, @@ -87,7 +94,7 @@ func TestService_ListResources(t *testing.T) { name string setup func(t *testing.T) *core.Service filter resource.Filter - want []resource.Resource + want resource.PagedResource wantErr error }{ { @@ -96,12 +103,12 @@ func TestService_ListResources(t *testing.T) { t.Helper() repo := &mocks.ResourceStore{} repo.EXPECT(). - List(mock.Anything, mock.Anything). + List(mock.Anything, mock.Anything, false). Return(nil, nil). Once() - return core.New(repo, nil, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(repo, nil, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, - want: nil, + want: resource.PagedResource{}, wantErr: nil, }, { @@ -110,12 +117,12 @@ func TestService_ListResources(t *testing.T) { t.Helper() repo := &mocks.ResourceStore{} repo.EXPECT(). - List(mock.Anything, mock.Anything). + List(mock.Anything, mock.Anything, false). Return(nil, errStoreFailure). Once() - return core.New(repo, nil, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(repo, nil, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, - want: nil, + want: resource.PagedResource{}, wantErr: errors.ErrInternal, }, { @@ -124,12 +131,15 @@ func TestService_ListResources(t *testing.T) { t.Helper() repo := &mocks.ResourceStore{} repo.EXPECT(). - List(mock.Anything, mock.Anything). + List(mock.Anything, mock.Anything, false). Return([]resource.Resource{sampleResource}, nil). Once() - return core.New(repo, nil, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(repo, nil, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) + }, + want: resource.PagedResource{ + Count: 1, + Resources: []resource.Resource{sampleResource}, }, - want: []resource.Resource{sampleResource}, wantErr: nil, }, } @@ -140,7 +150,7 @@ func TestService_ListResources(t *testing.T) { t.Parallel() svc := tt.setup(t) - got, err := svc.ListResources(context.Background(), tt.filter) + got, err := svc.ListResources(context.Background(), tt.filter, false) if tt.wantErr != nil { assert.Error(t, err) assert.True(t, errors.Is(err, tt.wantErr)) diff --git a/core/resource/resource.go b/core/resource/resource.go index fc2b0a3a..32879d48 100644 --- a/core/resource/resource.go +++ b/core/resource/resource.go @@ -9,24 +9,29 @@ import ( "strings" "time" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) const urnSeparator = ":" var namingPattern = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_-]+$`) +var namingPatternStartingWithDigits = regexp.MustCompile(`^\d*[A-Za-z0-9_-]+$`) type Store interface { GetByURN(ctx context.Context, urn string) (*Resource, error) - List(ctx context.Context, filter Filter) ([]Resource, error) + List(ctx context.Context, filter Filter, withSpecConfigs bool) ([]Resource, error) Create(ctx context.Context, r Resource, hooks ...MutationHook) error Update(ctx context.Context, r Resource, saveRevision bool, reason string, hooks ...MutationHook) error Delete(ctx context.Context, urn string, hooks ...MutationHook) error Revisions(ctx context.Context, selector RevisionsSelector) ([]Revision, error) + + SyncOne(ctx context.Context, scope map[string][]string, syncFn SyncFn) error } +type SyncFn func(ctx context.Context, res Resource) (*Resource, error) + // MutationHook values are passed to mutation operations of resource storage // to handle any transactional requirements. type MutationHook func(ctx context.Context) error @@ -41,24 +46,34 @@ type Resource struct { Labels map[string]string `json:"labels"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` + UpdatedBy string `json:"updated_by"` + CreatedBy string `json:"created_by"` Spec Spec `json:"spec"` State State `json:"state"` } +type PagedResource struct { + Count int32 + Resources []Resource +} + type Spec struct { Configs json.RawMessage `json:"configs"` Dependencies map[string]string `json:"dependencies"` } type Filter struct { - Kind string `json:"kind"` - Project string `json:"project"` - Labels map[string]string `json:"labels"` + Kind string `json:"kind"` + Project string `json:"project"` + Labels map[string]string `json:"labels"` + PageSize int32 `json:"page_size"` + PageNum int32 `json:"page_num"` } type UpdateRequest struct { Spec Spec `json:"spec"` Labels map[string]string `json:"labels"` + UserID string } type RevisionsSelector struct { @@ -71,6 +86,7 @@ type Revision struct { Reason string `json:"reason"` Labels map[string]string `json:"labels"` CreatedAt time.Time `json:"created_at"` + CreatedBy string `json:"created_by"` Spec Spec `json:"spec"` } @@ -83,8 +99,8 @@ func (res *Resource) Validate(isCreate bool) error { if !namingPattern.MatchString(res.Kind) { return errors.ErrInvalid.WithMsgf("kind must match pattern '%s'", namingPattern) } - if !namingPattern.MatchString(res.Name) { - return errors.ErrInvalid.WithMsgf("name must match pattern '%s'", namingPattern) + if !namingPatternStartingWithDigits.MatchString(res.Name) { + return errors.ErrInvalid.WithMsgf("name must match pattern '%s'", namingPatternStartingWithDigits) } if !namingPattern.MatchString(res.Project) { return errors.ErrInvalid.WithMsgf("project must match pattern '%s'", namingPattern) @@ -95,7 +111,7 @@ func (res *Resource) Validate(isCreate bool) error { } if isCreate { - res.URN = generateURN(*res) + res.URN = GenerateURN(res.Kind, res.Project, res.Name) } return nil } @@ -126,7 +142,9 @@ func (f Filter) isMatch(r Resource) bool { return true } -func generateURN(res Resource) string { - parts := []string{"orn", "entropy", res.Kind, res.Project, res.Name} +// GenerateURN generates an Entropy URN address for the given combination. +// Note: Changing this will invalidate all existing resource identifiers. +func GenerateURN(kind, project, name string) string { + parts := []string{"orn", "entropy", kind, project, name} return strings.Join(parts, urnSeparator) } diff --git a/core/resource/resource_test.go b/core/resource/resource_test.go index f29fd367..520aef87 100644 --- a/core/resource/resource_test.go +++ b/core/resource/resource_test.go @@ -5,8 +5,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" ) func TestResource_Validate(t *testing.T) { @@ -44,13 +44,30 @@ func TestResource_Validate(t *testing.T) { }, want: errors.ErrInvalid, }, - + { + name: "ValidResourceWithNameStartingWithANumber", + res: resource.Resource{ + Kind: "fake", + Name: "12a1lpha", + Project: "goto", + }, + want: nil, + }, + { + name: "ValidResourceWithNameAsNumber", + res: resource.Resource{ + Kind: "fake", + Name: "112233", + Project: "goto", + }, + want: nil, + }, { name: "ValidResource", res: resource.Resource{ Kind: "fake", Name: "foo", - Project: "odpf", + Project: "goto", }, want: nil, }, diff --git a/core/resource/state.go b/core/resource/state.go index a661fd83..a435f81c 100644 --- a/core/resource/state.go +++ b/core/resource/state.go @@ -2,6 +2,7 @@ package resource import ( "encoding/json" + "time" ) const ( @@ -12,10 +13,18 @@ const ( StatusCompleted = "STATUS_COMPLETED" // terminal ) +type SyncResult struct { + Retries int `json:"retries"` + LastError string `json:"last_error"` +} + type State struct { Status string `json:"status"` Output json.RawMessage `json:"output"` ModuleData json.RawMessage `json:"module_data,omitempty"` + + NextSyncAt *time.Time `json:"next_sync_at,omitempty"` + SyncResult SyncResult `json:"sync_result"` } // IsTerminal returns true if state is terminal. A terminal state is diff --git a/core/resource/state_test.go b/core/resource/state_test.go index e245bf1b..112f477f 100644 --- a/core/resource/state_test.go +++ b/core/resource/state_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/odpf/entropy/core/resource" + "github.com/goto/entropy/core/resource" ) func TestState_IsTerminal(t *testing.T) { diff --git a/core/sync.go b/core/sync.go index b1f4ee27..51c52e91 100644 --- a/core/sync.go +++ b/core/sync.go @@ -2,111 +2,139 @@ package core import ( "context" - "encoding/json" "fmt" "time" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/worker" -) + "go.opentelemetry.io/otel/metric" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" -const ( - JobKindSyncResource = "sync_resource" - JobKindScheduledSyncResource = "sched_sync_resource" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/telemetry" + "go.opentelemetry.io/otel/attribute" ) -type syncJobPayload struct { - ResourceURN string `json:"resource_urn"` - UpdatedAt time.Time `json:"updated_at"` -} - -func (s *Service) enqueueSyncJob(ctx context.Context, res resource.Resource, runAt time.Time, jobType string) error { - data := syncJobPayload{ - ResourceURN: res.URN, - UpdatedAt: res.UpdatedAt, - } - - payload, err := json.Marshal(data) - if err != nil { - return err - } +type SyncStatus string - job := worker.Job{ - ID: fmt.Sprintf(jobType+"-%s-%d", res.URN, runAt.Unix()), - Kind: jobType, - RunAt: runAt, - Payload: payload, - } +const ( + PendingCounter SyncStatus = "pending" + CompletedCounter SyncStatus = "completed" + ErrorCounter SyncStatus = "error" + RetryCounter SyncStatus = "retry" +) - if err := s.worker.Enqueue(ctx, job); err != nil && !errors.Is(err, worker.ErrJobExists) { - return err +// RunSyncer runs the syncer thread that keeps performing resource-sync at +// regular intervals. +func (svc *Service) RunSyncer(ctx context.Context, workerCount int, interval time.Duration, scope map[string][]string, eg *errgroup.Group) { + for i := 0; i < workerCount; i++ { + eg.Go(func() error { + tick := time.NewTimer(interval) + defer tick.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-tick.C: + tick.Reset(interval) + + err := svc.store.SyncOne(ctx, scope, svc.handleSync) + if err != nil { + zap.L().Warn("SyncOne() failed", zap.Error(err)) + } + } + } + }) } - return nil } -// HandleSyncJob is meant to be invoked by asyncWorker when an enqueued job is -// ready. -// TODO: make this private and move the registration of this handler inside New(). -func (s *Service) HandleSyncJob(ctx context.Context, job worker.Job) ([]byte, error) { - const retryBackoff = 5 * time.Second +func (svc *Service) handleSync(ctx context.Context, res resource.Resource) (*resource.Resource, error) { + logEntry := zap.L().With( + zap.String("resource_urn", res.URN), + zap.String("resource_status", res.State.Status), + zap.Int("retries", res.State.SyncResult.Retries), + zap.String("last_err", res.State.SyncResult.LastError), + ) - var data syncJobPayload - if err := json.Unmarshal(job.Payload, &data); err != nil { + meter := telemetry.GetMeter(svc.serviceName) + retryCounter, err := setupCounter(meter, RetryCounter) + if err != nil { return nil, err } - - syncedRes, err := s.syncChange(ctx, data.ResourceURN) + errorCounter, err := setupCounter(meter, ErrorCounter) if err != nil { - if errors.Is(err, errors.ErrInternal) { - return nil, &worker.RetryableError{ - Cause: errors.Verbose(err), - RetryAfter: retryBackoff, - } - } - - return nil, errors.Verbose(err) + return nil, err } - - return json.Marshal(map[string]interface{}{ - "status": syncedRes.State.Status, - }) -} - -func (s *Service) syncChange(ctx context.Context, urn string) (*resource.Resource, error) { - res, err := s.GetResource(ctx, urn) + completedCounter, err := setupCounter(meter, CompletedCounter) if err != nil { return nil, err } - modSpec, err := s.generateModuleSpec(ctx, *res) + modSpec, err := svc.generateModuleSpec(ctx, res) if err != nil { + logEntry.Error("SyncOne() failed", zap.Error(err)) return nil, err } - oldState := res.State.Clone() - newState, err := s.moduleSvc.SyncState(ctx, *modSpec) + newState, err := svc.moduleSvc.SyncState(ctx, *modSpec) if err != nil { + logEntry.Error("SyncOne() failed", zap.Error(err)) + + res.State.SyncResult.LastError = err.Error() + res.State.SyncResult.Retries++ + + // Increment the retry counter. + logEntry.Info("Incrementing retry counter") + retryCounter.Add(context.Background(), 1, metric.WithAttributes(attribute.String("resource", res.URN))) + if errors.Is(err, errors.ErrInvalid) { - return nil, err - } - return nil, errors.ErrInternal.WithMsgf("sync() failed").WithCausef(err.Error()) - } - res.UpdatedAt = s.clock() - res.State = *newState - - // TODO: clarify on behaviour when resource schedule for deletion reaches error. - shouldDelete := oldState.InDeletion() && newState.IsTerminal() - if shouldDelete { - if err := s.DeleteResource(ctx, urn); err != nil { - return nil, err + // ErrInvalid is expected to be returned when config is invalid. + // There is no point in retrying in this case. + res.State.Status = resource.StatusError + res.State.NextSyncAt = nil + + // Increment the error counter. + logEntry.Info("Incrementing error counter") + errorCounter.Add(context.Background(), 1, metric.WithAttributes(attribute.String("resource", res.URN))) + } else if svc.maxSyncRetries > 0 && res.State.SyncResult.Retries >= svc.maxSyncRetries { + // Some other error occurred and no more retries remaining. + // move the resource to failure state. + res.State.Status = resource.StatusError + res.State.NextSyncAt = nil + + // Increment the error counter. + logEntry.Info("Incrementing error counter") + errorCounter.Add(context.Background(), 1, metric.WithAttributes(attribute.String("resource", res.URN))) + } else { + // Some other error occurred and we still have remaining retries. + // need to backoff and retry in some time. + tryAgainAt := svc.clock().Add(svc.syncBackoff) + res.State.NextSyncAt = &tryAgainAt } } else { - if err := s.upsert(ctx, module.Plan{Resource: *res}, false, false, ""); err != nil { - return nil, err - } + res.State.SyncResult.Retries = 0 + res.State.SyncResult.LastError = "" + res.UpdatedAt = svc.clock() + res.State = *newState + + // Increment the completed counter. + logEntry.Info("Incrementing completed counter") + completedCounter.Add(context.Background(), 1, metric.WithAttributes(attribute.String("resource", res.URN))) + + logEntry.Info("SyncOne() finished", + zap.String("final_status", res.State.Status), + zap.Timep("next_sync", res.State.NextSyncAt), + ) } - return res, nil + return &res, nil +} + +func setupCounter(meter metric.Meter, countername SyncStatus) (metric.Int64Counter, error) { + return meter.Int64Counter( + fmt.Sprintf("%s_counter", countername), + metric.WithDescription(fmt.Sprintf("Total number of %s performed", countername)), + metric.WithUnit("1"), + ) } diff --git a/core/write.go b/core/write.go index 7ed57253..aa28e5fb 100644 --- a/core/write.go +++ b/core/write.go @@ -2,13 +2,26 @@ package core import ( "context" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "fmt" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/telemetry" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.uber.org/zap" ) -func (s *Service) CreateResource(ctx context.Context, res resource.Resource) (*resource.Resource, error) { +type Options struct { + DryRun bool +} + +func WithDryRun(dryRun bool) Options { + return Options{DryRun: dryRun} +} + +func (svc *Service) CreateResource(ctx context.Context, res resource.Resource, resourceOpts ...Options) (*resource.Resource, error) { if err := res.Validate(true); err != nil { return nil, err } @@ -16,35 +29,43 @@ func (s *Service) CreateResource(ctx context.Context, res resource.Resource) (*r act := module.ActionRequest{ Name: module.CreateAction, Params: res.Spec.Configs, + Labels: res.Labels, + UserID: res.CreatedBy, } res.Spec.Configs = nil - return s.execAction(ctx, res, act) + dryRun := false + for _, opt := range resourceOpts { + dryRun = opt.DryRun + } + + return svc.execAction(ctx, res, act, dryRun) } -func (s *Service) UpdateResource(ctx context.Context, urn string, req resource.UpdateRequest) (*resource.Resource, error) { +func (svc *Service) UpdateResource(ctx context.Context, urn string, req resource.UpdateRequest, resourceOpts ...Options) (*resource.Resource, error) { if len(req.Spec.Dependencies) != 0 { return nil, errors.ErrUnsupported.WithMsgf("updating dependencies is not supported") } else if len(req.Spec.Configs) == 0 { return nil, errors.ErrInvalid.WithMsgf("no config is being updated, nothing to do") } - return s.ApplyAction(ctx, urn, module.ActionRequest{ + return svc.ApplyAction(ctx, urn, module.ActionRequest{ Name: module.UpdateAction, Params: req.Spec.Configs, Labels: req.Labels, - }) + UserID: req.UserID, + }, resourceOpts...) } -func (s *Service) DeleteResource(ctx context.Context, urn string) error { - _, actionErr := s.ApplyAction(ctx, urn, module.ActionRequest{ +func (svc *Service) DeleteResource(ctx context.Context, urn string) error { + _, actionErr := svc.ApplyAction(ctx, urn, module.ActionRequest{ Name: module.DeleteAction, - }) + }, WithDryRun(false)) return actionErr } -func (s *Service) ApplyAction(ctx context.Context, urn string, act module.ActionRequest) (*resource.Resource, error) { - res, err := s.GetResource(ctx, urn) +func (svc *Service) ApplyAction(ctx context.Context, urn string, act module.ActionRequest, resourceOpts ...Options) (*resource.Resource, error) { + res, err := svc.GetResource(ctx, urn) if err != nil { return nil, err } else if !res.State.IsTerminal() { @@ -52,40 +73,66 @@ func (s *Service) ApplyAction(ctx context.Context, urn string, act module.Action WithMsgf("cannot perform '%s' on resource in '%s'", act.Name, res.State.Status) } - return s.execAction(ctx, *res, act) + dryRun := false + for _, opt := range resourceOpts { + dryRun = opt.DryRun + } + + return svc.execAction(ctx, *res, act, dryRun) } -func (s *Service) execAction(ctx context.Context, res resource.Resource, act module.ActionRequest) (*resource.Resource, error) { - planned, err := s.planChange(ctx, res, act) +func (svc *Service) execAction(ctx context.Context, res resource.Resource, act module.ActionRequest, dryRun bool) (*resource.Resource, error) { + logEntry := zap.L().With( + zap.String("resource_urn", res.URN), + zap.String("resource_status", res.State.Status), + zap.Int("retries", res.State.SyncResult.Retries), + zap.String("last_err", res.State.SyncResult.LastError), + ) + + planned, err := svc.planChange(ctx, res, act) if err != nil { return nil, err } if isCreate(act.Name) { - planned.Resource.CreatedAt = s.clock() - planned.Resource.UpdatedAt = planned.Resource.CreatedAt + planned.CreatedAt = svc.clock() + planned.UpdatedAt = planned.CreatedAt + planned.CreatedBy = act.UserID + planned.UpdatedBy = act.UserID } else { - planned.Resource.CreatedAt = res.CreatedAt - planned.Resource.UpdatedAt = s.clock() + planned.CreatedAt = res.CreatedAt + planned.UpdatedAt = svc.clock() + planned.UpdatedBy = act.UserID } - if err := s.upsert(ctx, *planned, isCreate(act.Name), true, planned.Reason); err != nil { + reason := fmt.Sprintf("action:%s", act.Name) + + if !dryRun { + if err := svc.upsert(ctx, *planned, isCreate(act.Name), true, reason); err != nil { + return nil, err + } + } + + meter := telemetry.GetMeter(svc.serviceName) + pendingCounter, err := setupCounter(meter, PendingCounter) + if err != nil { return nil, err } - return &planned.Resource, nil -} -func isCreate(actionName string) bool { - return actionName == module.CreateAction + // Increment the pending counter. + logEntry.Info("Incrementing pending counter") + pendingCounter.Add(context.Background(), 1, metric.WithAttributes(attribute.String("resource", res.URN))) + + return planned, nil } -func (s *Service) planChange(ctx context.Context, res resource.Resource, act module.ActionRequest) (*module.Plan, error) { - modSpec, err := s.generateModuleSpec(ctx, res) +func (svc *Service) planChange(ctx context.Context, res resource.Resource, act module.ActionRequest) (*resource.Resource, error) { + modSpec, err := svc.generateModuleSpec(ctx, res) if err != nil { return nil, err } - planned, err := s.moduleSvc.PlanAction(ctx, *modSpec, act) + planned, err := svc.moduleSvc.PlanAction(ctx, *modSpec, act) if err != nil { if errors.Is(err, errors.ErrInvalid) { return nil, err @@ -93,46 +140,54 @@ func (s *Service) planChange(ctx context.Context, res resource.Resource, act mod return nil, errors.ErrInternal.WithMsgf("plan() failed").WithCausef(err.Error()) } - planned.Resource.Labels = act.Labels - if err := planned.Resource.Validate(isCreate(act.Name)); err != nil { + planned.Labels = mergeLabels(res.Labels, act.Labels) + if err := planned.Validate(isCreate(act.Name)); err != nil { return nil, err } - return planned, nil } -func (s *Service) upsert(ctx context.Context, plan module.Plan, isCreate bool, saveRevision bool, reason string) error { - var hooks []resource.MutationHook - hooks = append(hooks, func(ctx context.Context) error { - if plan.Resource.State.IsTerminal() { - // no need to enqueue if resource has reached terminal state. - return nil - } - - return s.enqueueSyncJob(ctx, plan.Resource, s.clock(), JobKindSyncResource) - }) - - if !plan.ScheduleRunAt.IsZero() { - hooks = append(hooks, func(ctx context.Context) error { - return s.enqueueSyncJob(ctx, plan.Resource, plan.ScheduleRunAt, JobKindScheduledSyncResource) - }) - } - +func (svc *Service) upsert(ctx context.Context, res resource.Resource, isCreate bool, saveRevision bool, reason string) error { var err error if isCreate { - err = s.store.Create(ctx, plan.Resource, hooks...) + err = svc.store.Create(ctx, res) } else { - err = s.store.Update(ctx, plan.Resource, saveRevision, reason, hooks...) + err = svc.store.Update(ctx, res, saveRevision, reason) } if err != nil { if isCreate && errors.Is(err, errors.ErrConflict) { - return errors.ErrConflict.WithMsgf("resource with urn '%s' already exists", plan.Resource.URN) + return errors.ErrConflict.WithMsgf("resource with urn '%s' already exists", res.URN) } else if !isCreate && errors.Is(err, errors.ErrNotFound) { - return errors.ErrNotFound.WithMsgf("resource with urn '%s' does not exist", plan.Resource.URN) + return errors.ErrNotFound.WithMsgf("resource with urn '%s' does not exist", res.URN) } return errors.ErrInternal.WithCausef(err.Error()) } return nil } + +func isCreate(actionName string) bool { + return actionName == module.CreateAction +} + +func mergeLabels(m1, m2 map[string]string) map[string]string { + if m1 == nil && m2 == nil { + return nil + } + res := map[string]string{} + for k, v := range m1 { + res[k] = v + } + for k, v := range m2 { + res[k] = v + } + + // clean the empty values. + for k, v := range res { + if v == "" { + delete(res, k) + } + } + return res +} diff --git a/core/write_test.go b/core/write_test.go index 5ab387cf..fd419bb4 100644 --- a/core/write_test.go +++ b/core/write_test.go @@ -8,12 +8,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "github.com/odpf/entropy/core" - "github.com/odpf/entropy/core/mocks" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/worker" + "github.com/goto/entropy/core" + "github.com/goto/entropy/core/mocks" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/worker" ) func TestService_CreateResource(t *testing.T) { @@ -26,6 +26,7 @@ func TestService_CreateResource(t *testing.T) { setup func(t *testing.T) *core.Service res resource.Resource want *resource.Resource + options []core.Options wantErr error }{ { @@ -37,7 +38,7 @@ func TestService_CreateResource(t *testing.T) { PlanAction(mock.Anything, mock.Anything, mock.Anything). Return(nil, errSample).Once() - return core.New(nil, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(nil, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, res: resource.Resource{ Kind: "mock", @@ -59,7 +60,7 @@ func TestService_CreateResource(t *testing.T) { Return(nil, errors.ErrNotFound). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, res: resource.Resource{ Kind: "mock", @@ -98,7 +99,7 @@ func TestService_CreateResource(t *testing.T) { }, nil). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, res: resource.Resource{ Kind: "mock", @@ -136,7 +137,7 @@ func TestService_CreateResource(t *testing.T) { }, nil). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, res: resource.Resource{ Kind: "mock", @@ -158,12 +159,10 @@ func TestService_CreateResource(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, mock.Anything). - Return(&module.Plan{ - Resource: resource.Resource{ - Kind: "mock", - Name: "child", - Project: "project", - }, + Return(&resource.Resource{ + Kind: "mock", + Name: "child", + Project: "project", }, nil).Once() resourceRepo := &mocks.ResourceStore{} @@ -172,7 +171,7 @@ func TestService_CreateResource(t *testing.T) { Return(errSample). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, res: resource.Resource{ Kind: "mock", @@ -189,12 +188,10 @@ func TestService_CreateResource(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, mock.Anything). - Return(&module.Plan{ - Resource: resource.Resource{ - Kind: "mock", - Name: "child", - Project: "project", - }, + Return(&resource.Resource{ + Kind: "mock", + Name: "child", + Project: "project", }, nil).Once() resourceRepo := &mocks.ResourceStore{} @@ -202,7 +199,7 @@ func TestService_CreateResource(t *testing.T) { Create(mock.Anything, mock.Anything, mock.Anything). Return(errors.ErrConflict).Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, res: resource.Resource{ Kind: "mock", @@ -219,13 +216,11 @@ func TestService_CreateResource(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, mock.Anything). - Return(&module.Plan{ - Resource: resource.Resource{ - Kind: "mock", - Name: "child", - Project: "project", - State: resource.State{Status: resource.StatusCompleted}, - }, + Return(&resource.Resource{ + Kind: "mock", + Name: "child", + Project: "project", + State: resource.State{Status: resource.StatusCompleted}, }, nil).Once() mod.EXPECT(). GetOutput(mock.Anything, mock.Anything). @@ -247,8 +242,7 @@ func TestService_CreateResource(t *testing.T) { resourceRepo.EXPECT(). Create(mock.Anything, mock.Anything, mock.Anything). Run(func(ctx context.Context, r resource.Resource, hooks ...resource.MutationHook) { - assert.Len(t, hooks, 1) - assert.NoError(t, hooks[0](ctx)) + assert.Len(t, hooks, 0) }). Return(nil). Once() @@ -262,7 +256,7 @@ func TestService_CreateResource(t *testing.T) { }). Return(nil) - return core.New(resourceRepo, mod, mockWorker, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, res: resource.Resource{ Kind: "mock", @@ -285,6 +279,41 @@ func TestService_CreateResource(t *testing.T) { }, wantErr: nil, }, + { + name: "AlreadyExistsWithDryRun", + setup: func(t *testing.T) *core.Service { + t.Helper() + mod := &mocks.ModuleService{} + mod.EXPECT(). + PlanAction(mock.Anything, mock.Anything, mock.Anything). + Return(&resource.Resource{ + Kind: "mock", + Name: "child", + Project: "project", + State: resource.State{Status: resource.StatusCompleted}, + }, nil).Once() + + resourceRepo := &mocks.ResourceStore{} + + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) + }, + res: resource.Resource{ + Kind: "mock", + Name: "child", + Project: "project", + }, + want: &resource.Resource{ + URN: "orn:entropy:mock:project:child", + Kind: "mock", + Name: "child", + Project: "project", + State: resource.State{Status: resource.StatusCompleted}, + CreatedAt: frozenTime, + UpdatedAt: frozenTime, + }, + options: []core.Options{core.WithDryRun(true)}, + wantErr: nil, + }, } for _, tt := range tests { @@ -293,7 +322,7 @@ func TestService_CreateResource(t *testing.T) { t.Parallel() svc := tt.setup(t) - got, err := svc.CreateResource(context.Background(), tt.res) + got, err := svc.CreateResource(context.Background(), tt.res, tt.options...) if tt.wantErr != nil { assert.Error(t, err) assert.True(t, errors.Is(err, tt.wantErr)) @@ -317,12 +346,25 @@ func TestService_UpdateResource(t *testing.T) { CreatedAt: frozenTime, } + testResourceForDryRun := resource.Resource{ + URN: "orn:entropy:mock:project:childtwo", + Kind: "mock", + Name: "childtwo", + Project: "project", + State: resource.State{Status: resource.StatusCompleted}, + Spec: resource.Spec{ + Configs: []byte(`{"foo": "bar-old"}`), + }, + CreatedAt: frozenTime, + } + tests := []struct { name string setup func(t *testing.T) *core.Service urn string update resource.UpdateRequest want *resource.Resource + options []core.Options wantErr error }{ { @@ -335,7 +377,7 @@ func TestService_UpdateResource(t *testing.T) { Return(nil, errors.ErrNotFound). Once() - return core.New(resourceRepo, nil, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, nil, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:project:child", update: resource.UpdateRequest{ @@ -364,7 +406,7 @@ func TestService_UpdateResource(t *testing.T) { Return(&testResource, nil). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:project:child", update: resource.UpdateRequest{ @@ -381,7 +423,7 @@ func TestService_UpdateResource(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, mock.Anything). - Return(&module.Plan{Resource: testResource}, nil).Once() + Return(&testResource, nil).Once() mod.EXPECT(). GetOutput(mock.Anything, mock.Anything). Return(nil, nil). @@ -396,8 +438,7 @@ func TestService_UpdateResource(t *testing.T) { resourceRepo.EXPECT(). Update(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Run(func(ctx context.Context, r resource.Resource, saveRevision bool, reason string, hooks ...resource.MutationHook) { - assert.Len(t, hooks, 1) - assert.NoError(t, hooks[0](ctx)) + assert.Len(t, hooks, 0) }). Return(testErr) @@ -412,7 +453,7 @@ func TestService_UpdateResource(t *testing.T) { Return(nil). Once() - return core.New(resourceRepo, mod, mockWorker, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:project:child", update: resource.UpdateRequest{ @@ -429,18 +470,16 @@ func TestService_UpdateResource(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, mock.Anything). - Return(&module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:mock:project:child", - Kind: "mock", - Name: "child", - Project: "project", - Spec: resource.Spec{ - Configs: []byte(`{"foo": "bar"}`), - }, - State: resource.State{Status: resource.StatusPending}, - CreatedAt: frozenTime, + Return(&resource.Resource{ + URN: "orn:entropy:mock:project:child", + Kind: "mock", + Name: "child", + Project: "project", + Spec: resource.Spec{ + Configs: []byte(`{"foo": "bar"}`), }, + State: resource.State{Status: resource.StatusPending}, + CreatedAt: frozenTime, }, nil).Once() mod.EXPECT(). GetOutput(mock.Anything, mock.Anything). @@ -456,23 +495,11 @@ func TestService_UpdateResource(t *testing.T) { Update(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(nil). Run(func(ctx context.Context, r resource.Resource, saveRevision bool, reason string, hooks ...resource.MutationHook) { - assert.Len(t, hooks, 1) - assert.NoError(t, hooks[0](ctx)) + assert.Len(t, hooks, 0) }). Twice() - mockWorker := &mocks.AsyncWorker{} - mockWorker.EXPECT(). - Enqueue(mock.Anything, mock.Anything). - Return(nil). - Run(func(ctx context.Context, jobs ...worker.Job) { - assert.Len(t, jobs, 1) - assert.Equal(t, jobs[0].ID, "sync_resource-orn:entropy:mock:project:child-1650536955") - assert.Equal(t, jobs[0].Kind, "sync_resource") - }). - Once() - - return core.New(resourceRepo, mod, mockWorker, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:project:child", update: resource.UpdateRequest{ @@ -494,6 +521,57 @@ func TestService_UpdateResource(t *testing.T) { }, wantErr: nil, }, + { + name: "SuccessWithDryRun", + setup: func(t *testing.T) *core.Service { + t.Helper() + mod := &mocks.ModuleService{} + mod.EXPECT(). + PlanAction(mock.Anything, mock.Anything, mock.Anything). + Return(&resource.Resource{ + URN: "orn:entropy:mock:project:childtwo", + Kind: "mock", + Name: "childtwo", + Project: "project", + Spec: resource.Spec{ + Configs: []byte(`{"foo": "bar"}`), + }, + State: resource.State{Status: resource.StatusPending}, + CreatedAt: frozenTime, + }, nil).Once() + mod.EXPECT(). + GetOutput(mock.Anything, mock.Anything). + Return(nil, nil). + Once() + + resourceRepo := &mocks.ResourceStore{} + resourceRepo.EXPECT(). + GetByURN(mock.Anything, "orn:entropy:mock:project:childtwo"). + Return(&testResourceForDryRun, nil).Once() + + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) + }, + urn: "orn:entropy:mock:project:childtwo", + update: resource.UpdateRequest{ + Spec: resource.Spec{Configs: []byte(`{"foo": "bar"}`)}, + Labels: map[string]string{"created_by": "test_user", "group": "test_group"}, + }, + want: &resource.Resource{ + URN: "orn:entropy:mock:project:childtwo", + Kind: "mock", + Name: "childtwo", + Project: "project", + CreatedAt: frozenTime, + UpdatedAt: frozenTime, + State: resource.State{Status: resource.StatusPending}, + Labels: map[string]string{"created_by": "test_user", "group": "test_group"}, + Spec: resource.Spec{ + Configs: []byte(`{"foo": "bar"}`), + }, + }, + options: []core.Options{core.WithDryRun(true)}, + wantErr: nil, + }, } for _, tt := range tests { @@ -502,7 +580,7 @@ func TestService_UpdateResource(t *testing.T) { t.Parallel() svc := tt.setup(t) - got, err := svc.UpdateResource(context.Background(), tt.urn, tt.update) + got, err := svc.UpdateResource(context.Background(), tt.urn, tt.update, tt.options...) if tt.wantErr != nil { assert.Error(t, err) assert.True(t, errors.Is(err, tt.wantErr)) @@ -535,7 +613,7 @@ func TestService_DeleteResource(t *testing.T) { Return(nil, testErr). Once() - return core.New(resourceRepo, nil, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, nil, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:foo:bar", wantErr: testErr, @@ -547,16 +625,14 @@ func TestService_DeleteResource(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, mock.Anything). - Return(&module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:mock:project:child", - Kind: "mock", - Name: "child", - Project: "project", - State: resource.State{Status: resource.StatusPending}, - CreatedAt: frozenTime, - UpdatedAt: frozenTime, - }, + Return(&resource.Resource{ + URN: "orn:entropy:mock:project:child", + Kind: "mock", + Name: "child", + Project: "project", + State: resource.State{Status: resource.StatusPending}, + CreatedAt: frozenTime, + UpdatedAt: frozenTime, }, nil).Once() mod.EXPECT(). GetOutput(mock.Anything, mock.Anything). @@ -582,7 +658,7 @@ func TestService_DeleteResource(t *testing.T) { Return(testErr). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:foo:bar", wantErr: errors.ErrInternal, @@ -594,16 +670,14 @@ func TestService_DeleteResource(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, mock.Anything). - Return(&module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:mock:project:child", - Kind: "mock", - Name: "child", - Project: "project", - State: resource.State{Status: resource.StatusPending}, - CreatedAt: frozenTime, - UpdatedAt: frozenTime, - }, + Return(&resource.Resource{ + URN: "orn:entropy:mock:project:child", + Kind: "mock", + Name: "child", + Project: "project", + State: resource.State{Status: resource.StatusPending}, + CreatedAt: frozenTime, + UpdatedAt: frozenTime, }, nil).Once() mod.EXPECT(). GetOutput(mock.Anything, mock.Anything). @@ -629,7 +703,7 @@ func TestService_DeleteResource(t *testing.T) { Return(nil). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:foo:bar", wantErr: nil, @@ -667,6 +741,7 @@ func TestService_ApplyAction(t *testing.T) { urn string action module.ActionRequest want *resource.Resource + options []core.Options wantErr error }{ { @@ -679,7 +754,7 @@ func TestService_ApplyAction(t *testing.T) { Return(nil, errors.ErrNotFound). Once() - return core.New(resourceRepo, nil, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, nil, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:foo:bar", action: sampleAction, @@ -706,7 +781,7 @@ func TestService_ApplyAction(t *testing.T) { }, nil). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:foo:bar", action: sampleAction, @@ -739,7 +814,7 @@ func TestService_ApplyAction(t *testing.T) { }, nil). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:foo:bar", action: sampleAction, @@ -753,14 +828,12 @@ func TestService_ApplyAction(t *testing.T) { mod := &mocks.ModuleService{} mod.EXPECT(). PlanAction(mock.Anything, mock.Anything, sampleAction). - Return(&module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:mock:foo:bar", - Kind: "mock", - Project: "foo", - Name: "bar", - State: resource.State{Status: resource.StatusPending}, - }, + Return(&resource.Resource{ + URN: "orn:entropy:mock:foo:bar", + Kind: "mock", + Project: "foo", + Name: "bar", + State: resource.State{Status: resource.StatusPending}, }, nil).Once() mod.EXPECT(). GetOutput(mock.Anything, mock.Anything). @@ -784,7 +857,54 @@ func TestService_ApplyAction(t *testing.T) { Return(nil). Once() - return core.New(resourceRepo, mod, &mocks.AsyncWorker{}, deadClock, nil) + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) + }, + urn: "orn:entropy:mock:foo:bar", + action: sampleAction, + want: &resource.Resource{ + URN: "orn:entropy:mock:foo:bar", + Kind: "mock", + Project: "foo", + Name: "bar", + State: resource.State{Status: resource.StatusPending}, + CreatedAt: frozenTime, + UpdatedAt: frozenTime, + }, + wantErr: nil, + }, + { + name: "SuccessWithDryRun", + setup: func(t *testing.T) *core.Service { + t.Helper() + mod := &mocks.ModuleService{} + mod.EXPECT(). + PlanAction(mock.Anything, mock.Anything, sampleAction). + Return(&resource.Resource{ + URN: "orn:entropy:mock:foo:bar", + Kind: "mock", + Project: "foo", + Name: "bar", + State: resource.State{Status: resource.StatusPending}, + }, nil).Once() + mod.EXPECT(). + GetOutput(mock.Anything, mock.Anything). + Return(nil, nil). + Once() + + resourceRepo := &mocks.ResourceStore{} + resourceRepo.EXPECT(). + GetByURN(mock.Anything, "orn:entropy:mock:foo:bar"). + Return(&resource.Resource{ + URN: "orn:entropy:mock:foo:bar", + Kind: "mock", + Project: "foo", + Name: "bar", + CreatedAt: frozenTime, + State: resource.State{Status: resource.StatusCompleted}, + }, nil). + Once() + + return core.New(resourceRepo, mod, deadClock, defaultSyncBackoff, defaultMaxRetries, serviceName) }, urn: "orn:entropy:mock:foo:bar", action: sampleAction, @@ -798,6 +918,7 @@ func TestService_ApplyAction(t *testing.T) { UpdatedAt: frozenTime, }, wantErr: nil, + options: []core.Options{core.WithDryRun(true)}, }, } @@ -807,7 +928,7 @@ func TestService_ApplyAction(t *testing.T) { t.Parallel() svc := tt.setup(t) - got, err := svc.ApplyAction(context.Background(), tt.urn, tt.action) + got, err := svc.ApplyAction(context.Background(), tt.urn, tt.action, tt.options...) if tt.wantErr != nil { assert.Error(t, err) assert.True(t, errors.Is(err, tt.wantErr), cmp.Diff(tt.want, err)) diff --git a/docker-compose.yaml b/docker-compose.yaml index 6555df65..7ec4ec68 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -5,13 +5,12 @@ services: image: "postgres:14-alpine" environment: POSTGRES_HOST_AUTH_METHOD: trust + PUID: 1000 + PGID: 1000 ports: - "5432:5432" volumes: - pgdata:/var/lib/postgresql/data - environment: - - PUID=1000 - - PGID=1000 restart: unless-stopped container_name: pg14 diff --git a/docs/concepts/resource-life-cycle.md b/docs/concepts/resource-life-cycle.md index 3f19bb20..656ff732 100644 --- a/docs/concepts/resource-life-cycle.md +++ b/docs/concepts/resource-life-cycle.md @@ -37,7 +37,7 @@ type Output map[string]interface{} The `Resource` definition is self explanatory. It has the `Spec` field which holds the `Configs` and `Dependencies` of a resource. The `State` field has three parts, `Status` holds the current status of a resource, `Output` holds the outcome of the latest action performed while `Data` holds the transactory information which might be used to perform actions on the reosurce. -For instance, a [firehose](https://github.com/odpf/firehose) resource looks like: +For instance, a [firehose](https://github.com/goto/firehose) resource looks like: ``` { diff --git a/docs/installation.md b/docs/installation.md index 16c1acc4..e44c7e8e 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -9,7 +9,7 @@ Entropy installation is simple. You can install Entropy on macOS, Windows, Linux #### Binary (Cross-platform) -Download the appropriate version for your platform from [releases](https://github.com/odpf/entropy/releases) page. Once +Download the appropriate version for your platform from [releases](https://github.com/goto/entropy/releases) page. Once downloaded, the binary can be run from anywhere. You don’t need to install it into a global location. This works well for shared hosts and other systems where you don’t have a privileged account. Ideally, you should install it somewhere in your PATH for easy use. `/usr/local/bin` is the most probable location. @@ -18,7 +18,7 @@ in your PATH for easy use. `/usr/local/bin` is the most probable location. ```sh # Install entropy (requires homebrew installed) -$ brew install odpf/taps/entropy +$ brew install goto/taps/entropy # Upgrade entropy (requires homebrew installed) $ brew upgrade entropy @@ -33,7 +33,7 @@ To compile from source, you will need [Go](https://golang.org/) installed in you ```bash # Clone the repo -$ https://github.com/odpf/entropy.git +$ https://github.com/goto/entropy.git # Build entropy binary file $ make build @@ -44,12 +44,12 @@ $ ./entropy version ### Using Docker image -Entropy ships a Docker image [odpf/entropy](https://hub.docker.com/r/odpf/entropy) that enables you to use `entropy` as part of your Docker workflow. +Entropy ships a Docker image [goto/entropy](https://hub.docker.com/r/goto/entropy) that enables you to use `entropy` as part of your Docker workflow. For example, you can run `entropy version` with this command: ```bash -$ docker run odpf/entropy version +$ docker run goto/entropy version ``` ### Verifying the installation diff --git a/docs/modules/firehose.md b/docs/modules/firehose.md index 4e981e8a..b870e4df 100644 --- a/docs/modules/firehose.md +++ b/docs/modules/firehose.md @@ -1,6 +1,6 @@ # Firehose -[Firehose](https://odpf.github.io/firehose/) is an extensible, no-code, and cloud-native service to load real-time streaming data from Kafka to data stores, data lakes, and analytical storage systems. +[Firehose](https://goto.github.io/firehose/) is an extensible, no-code, and cloud-native service to load real-time streaming data from Kafka to data stores, data lakes, and analytical storage systems. ## What happens in Plan? @@ -34,4 +34,4 @@ type moduleConfig struct { | `ChartVersion` | `string` Chart version you want to use. | | `Firehose` | `struct` Holds firehose configuration. | -Detailed JSONSchema for config can be referenced [here](https://github.com/odpf/entropy/blob/main/modules/firehose/schema/config.json). \ No newline at end of file +Detailed JSONSchema for config can be referenced [here](https://github.com/goto/entropy/blob/main/modules/firehose/schema/config.json). \ No newline at end of file diff --git a/docs/modules/kubernetes.md b/docs/modules/kubernetes.md index 54fee8b7..8df4cbd6 100644 --- a/docs/modules/kubernetes.md +++ b/docs/modules/kubernetes.md @@ -43,7 +43,7 @@ type Config struct { | `ClientCertificate` | `string` PEM-encoded client certificate for TLS authentication. | Note: User shall either enable Insecure or set ClusterCACertificate. Also, user can either use Token to aunthenate a service account or they can use ClientKey & ClientCertificate for TLS authentication. -Detailed JSONSchema for config can be referenced [here](https://github.com/odpf/entropy/blob/main/modules/kubernetes/config_schema.json). +Detailed JSONSchema for config can be referenced [here](https://github.com/goto/entropy/blob/main/modules/kubernetes/config_schema.json). ## Supported actions diff --git a/entropy.yaml b/entropy.yaml index 1f683eda..1a86db93 100644 --- a/entropy.yaml +++ b/entropy.yaml @@ -33,7 +33,7 @@ worker: # Interval between successive polls by a single worker thread. be careful when # reducing this since it can cause contention when combined high threads value # and lot of entropy instances. - poll_interval: 1s + poll_interval: 1000000000 # instrumentation/metrics related configurations. telemetry: diff --git a/go.mod b/go.mod index f9c597da..da0739f0 100644 --- a/go.mod +++ b/go.mod @@ -1,196 +1,236 @@ -module github.com/odpf/entropy +module github.com/goto/entropy -go 1.18 +go 1.22.0 + +toolchain go1.22.3 require ( - contrib.go.opencensus.io/exporter/ocagent v0.7.0 - contrib.go.opencensus.io/exporter/prometheus v0.4.1 - github.com/Masterminds/squirrel v1.5.2 - github.com/google/go-cmp v0.5.8 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/lib/pq v1.10.4 + github.com/MakeNowJust/heredoc v1.0.0 + github.com/Masterminds/squirrel v1.5.4 + github.com/ghodss/yaml v1.0.0 + github.com/go-playground/validator/v10 v10.15.4 + github.com/google/go-cmp v0.6.0 + github.com/gorilla/mux v1.8.0 + github.com/goto/salt v0.3.7 + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 + github.com/jmoiron/sqlx v1.3.5 + github.com/lib/pq v1.10.9 github.com/mcuadros/go-defaults v1.2.0 - github.com/newrelic/go-agent/v3 v3.17.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/newrelic/go-agent/v3 v3.25.1 github.com/newrelic/go-agent/v3/integrations/nrgorilla v1.1.1 - github.com/newrelic/go-agent/v3/integrations/nrgrpc v1.3.1 - github.com/newrelic/newrelic-opencensus-exporter-go v0.4.0 - github.com/odpf/salt v0.2.1 - github.com/rs/xid v1.2.1 - github.com/spf13/cobra v1.4.0 - github.com/stretchr/testify v1.7.1 + github.com/newrelic/go-agent/v3/integrations/nrgrpc v1.4.1 + github.com/rs/xid v1.5.0 + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.10.0 github.com/xeipuuv/gojsonschema v1.2.0 - go.buf.build/odpf/gw/odpf/proton v1.1.122 - go.buf.build/odpf/gwv/odpf/proton v1.1.172 - go.opencensus.io v0.23.0 - go.uber.org/zap v1.21.0 - google.golang.org/grpc v1.46.2 - google.golang.org/protobuf v1.28.0 + go.opencensus.io v0.24.0 + go.opentelemetry.io/otel v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0 + go.opentelemetry.io/otel/exporters/prometheus v0.56.0 + go.opentelemetry.io/otel/metric v1.34.0 + go.opentelemetry.io/otel/sdk v1.34.0 + go.opentelemetry.io/otel/sdk/metric v1.34.0 + go.uber.org/zap v1.26.0 + google.golang.org/api v0.141.0 + google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 + google.golang.org/grpc v1.60.1 + google.golang.org/protobuf v1.36.3 gopkg.in/yaml.v2 v2.4.0 - helm.sh/helm/v3 v3.9.0 - k8s.io/api v0.24.0 - k8s.io/apimachinery v0.24.0 - k8s.io/client-go v0.24.0 + gotest.tools v2.2.0+incompatible + helm.sh/helm/v3 v3.12.3 + k8s.io/api v0.28.2 + k8s.io/apimachinery v0.28.2 + k8s.io/client-go v0.28.2 + sigs.k8s.io/kind v0.23.0 ) require ( - github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect - github.com/cli/safeexec v1.0.0 // indirect - github.com/go-kit/log v0.1.0 // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 // indirect - github.com/prometheus/statsd_exporter v0.21.0 // indirect - google.golang.org/api v0.62.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/authzed/authzed-go v0.7.0 // indirect + github.com/authzed/grpcutil v0.0.0-20230908193239-4286bb1d6403 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect + github.com/containerd/continuity v0.4.2 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70 // indirect + github.com/opencontainers/runc v1.1.5 // indirect + github.com/ory/dockertest/v3 v3.9.1 + golang.org/x/mod v0.17.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/BurntSushi/toml v1.0.0 // indirect - github.com/MakeNowJust/heredoc v1.0.0 + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/BurntSushi/toml v1.3.2 github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.1.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.2 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/alecthomas/chroma v0.8.2 // indirect - github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/alecthomas/chroma v0.10.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/briandowns/spinner v1.18.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect - github.com/charmbracelet/glamour v0.3.0 // indirect - github.com/containerd/containerd v1.6.3 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect - github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect + github.com/briandowns/spinner v1.23.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/charmbracelet/glamour v0.6.0 // indirect + github.com/containerd/containerd v1.7.6 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dlclark/regexp2 v1.2.0 // indirect - github.com/docker/cli v20.10.11+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.14+incompatible // indirect - github.com/docker/docker-credential-helpers v0.6.4 // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect + github.com/docker/cli v24.0.6+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.6+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/emicklei/go-restful v2.9.5+incompatible // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/ghodss/yaml v1.0.0 - github.com/go-errors/errors v1.0.1 // indirect - github.com/go-gorp/gorp/v3 v3.0.2 // indirect - github.com/go-logr/logr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-errors/errors v1.5.0 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/btree v1.0.1 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/btree v1.1.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 github.com/gorilla/css v1.0.0 // indirect - github.com/gorilla/mux v1.8.0 github.com/gosuri/uitable v0.0.4 // indirect - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0 + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jeremywohl/flatten v1.0.1 // indirect - github.com/jmoiron/sqlx v1.3.5 github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.13.6 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/magiconair/properties v1.8.5 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/microcosm-cc/bluemonday v1.0.6 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/microcosm-cc/bluemonday v1.0.25 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/muesli/reflow v0.2.0 // indirect - github.com/muesli/termenv v0.9.0 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.15.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oklog/run v1.1.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect - github.com/pelletier/go-toml v1.9.4 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/rivo/uniseg v0.2.0 // indirect - github.com/rubenv/sql-migrate v1.1.1 // indirect - github.com/russross/blackfriday v1.5.2 // indirect - github.com/schollz/progressbar/v3 v3.8.5 // indirect - github.com/shopspring/decimal v1.2.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/cast v1.4.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.61.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rubenv/sql-migrate v1.5.2 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/schollz/progressbar/v3 v3.13.1 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.8.1 // indirect - github.com/stretchr/objx v0.2.0 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/spf13/viper v1.16.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect - github.com/yuin/goldmark v1.4.1 // indirect - github.com/yuin/goldmark-emoji v1.0.1 // indirect - go.buf.build/odpf/gw/grpc-ecosystem/grpc-gateway v1.1.44 // indirect - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect - golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect - golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/yuin/goldmark v1.5.6 // indirect + github.com/yuin/goldmark-emoji v1.0.2 // indirect + go.starlark.net v0.0.0-20230912135651-745481cf39ed // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.30.0 // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/oauth2 v0.24.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.3.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.62.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/apiextensions-apiserver v0.24.0 // indirect - k8s.io/apiserver v0.24.0 // indirect - k8s.io/cli-runtime v0.24.0 // indirect - k8s.io/component-base v0.24.0 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/kubectl v0.24.0 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - oras.land/oras-go v1.1.0 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/kustomize/api v0.11.4 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 + k8s.io/apiextensions-apiserver v0.28.2 // indirect + k8s.io/apiserver v0.28.2 // indirect + k8s.io/cli-runtime v0.28.2 // indirect + k8s.io/component-base v0.28.2 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d // indirect + k8s.io/kubectl v0.28.2 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + oras.land/oras-go v1.2.4 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.14.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) + +require ( + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/Microsoft/hcsshim v0.11.0 // indirect + github.com/alessio/shellescape v1.4.1 // indirect + github.com/andybalholm/brotli v1.0.5 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/cli/safeexec v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/newrelic/csec-go-agent v0.4.0 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.50.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/runtime v0.59.0 + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + golang.org/x/arch v0.5.0 // indirect + google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect +) diff --git a/go.sum b/go.sum index ee01b518..28aaf738 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,9 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -17,617 +16,245 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/ocagent v0.7.0 h1:BEfdCTXfMV30tLZD8c9n64V/tIZX5+9sXiuFLnrr1k8= -contrib.go.opencensus.io/exporter/ocagent v0.7.0/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= -contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= -contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= -github.com/AlecAivazis/survey/v2 v2.3.5/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Masterminds/squirrel v1.5.2 h1:UiOEi2ZX4RCSkpiNDQN5kro/XIBpSRk9iTqdIRPzUXE= -github.com/Masterminds/squirrel v1.5.2/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= -github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.0 h1:7EFNIY4igHEXUdj1zXgAyU3fLc7QfOKHbkldRVTBdiM= +github.com/Microsoft/hcsshim v0.11.0/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38 h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U= -github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI= -github.com/alecthomas/chroma v0.8.2 h1:x3zkuE2lUk/RIekyAJ3XRqSCP4zwWDfcw/YJCuCAACg= -github.com/alecthomas/chroma v0.8.2/go.mod h1:sko8vR34/90zvl5QdcUdvzL3J8NKjAUx9va9jPuFNoM= -github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721 h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo= -github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0= -github.com/alecthomas/kong v0.2.4/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= -github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897 h1:p9Sln00KOTlrYkxI1zYWl1QLnEqAqEARBEYa8FQnQcY= -github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek= +github.com/alecthomas/chroma v0.10.0/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= -github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= +github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= -github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= -github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/authzed/authzed-go v0.7.0 h1:etnzHUAIyxGiEaFYJPYkHTHzxCYWEGzZQMgVLe4xRME= +github.com/authzed/authzed-go v0.7.0/go.mod h1:bmjzzIQ34M0+z8NO9SLjf4oA0A9Ka9gUWVzeSbD0E7c= +github.com/authzed/grpcutil v0.0.0-20230908193239-4286bb1d6403 h1:bQeIwWWRI9bl93poTqpix4sYHi+gnXUPK7N6bMtXzBE= +github.com/authzed/grpcutil v0.0.0-20230908193239-4286bb1d6403/go.mod h1:s3qC7V7XIbiNWERv7Lfljy/Lx25/V1Qlexb0WJuA8uQ= +github.com/aymanbagabas/go-osc52 v1.0.3/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/briandowns/spinner v1.18.0 h1:SJs0maNOs4FqhBwiJ3Gr7Z1D39/rukIVGQvpNZVHVcM= -github.com/briandowns/spinner v1.18.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= +github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/charmbracelet/glamour v0.3.0 h1:3H+ZrKlSg8s+WU6V7eF2eRVYt8lCueffbi7r2+ffGkc= -github.com/charmbracelet/glamour v0.3.0/go.mod h1:TzF0koPZhqq0YVBNL100cPHznAAjVj7fksX2RInwjGw= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d h1:S2NE3iHSwP0XV47EEXL8mWmRdEfGscSJ+7EgePNgt0s= +github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/charmbracelet/glamour v0.6.0 h1:wi8fse3Y7nfcabbbDuwolqTqMQPMnVPeZhDM273bISc= +github.com/charmbracelet/glamour v0.6.0/go.mod h1:taqWV4swIMMbWALc0m7AfE9JkPSU8om2538k9ITBxOc= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/cli/safeexec v1.0.0 h1:0VngyaIyqACHdcMNWfo6+KdUYnqEr2Sg+bSP1pdF+dI= -github.com/cli/safeexec v1.0.0/go.mod h1:Z/D4tTN8Vs5gXYHDCbaM1S/anmEDnJb1iW0+EJ5zx3Q= +github.com/cli/safeexec v1.0.1 h1:e/C79PbXF4yYTN/wauC4tviMxEV13BwljGj0N9j+N00= +github.com/cli/safeexec v1.0.1/go.mod h1:Z/D4tTN8Vs5gXYHDCbaM1S/anmEDnJb1iW0+EJ5zx3Q= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= -github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= -github.com/containerd/containerd v1.6.3 h1:JfgUEIAH07xDWk6kqz0P3ArZt+KJ9YeihSC9uyFtSKg= -github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= +github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= -github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= -github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ= -github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= -github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684 h1:DBZ2sN7CK6dgvHVpQsQj4sRMCbWTmd17l+5SUCjnQSY= -github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.11+incompatible h1:tXU1ezXcruZQRrMP8RN2z9N91h+6egZTS1gsPsKantc= -github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.14+incompatible h1:+T9/PRYWNDo5SZl5qS1r9Mo/0Q8AwxKKPtu9S1yxM0w= -github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= -github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-errors/errors v1.5.0 h1:/EuijeGOu7ckFxzhkj4CXJ8JaenxK7bKUxpPYqeLHqQ= +github.com/go-errors/errors v1.5.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= -github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.15.4 h1:zMXza4EpOdooxPel5xDqXEdXG5r+WggpvnAKMsalBjs= +github.com/go-playground/validator/v10 v10.15.4/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -640,9 +267,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -658,24 +282,16 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -687,21 +303,15 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -711,331 +321,160 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0/go.mod h1:qrJPVzv9YlhsrxJc3P/Q85nr0w1lIRikTl4JlhdDH5w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0 h1:ESEyqQqXXFIcImj/BE8oKEX37Zsuceb2cZI+EL/zNCY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.0/go.mod h1:XnLCLFp3tjoZJszVKjfpyAK6J8sYIcQXWQxmqLWF21I= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/goto/salt v0.3.7 h1:XwnKDEMXkYhStcyLtw7SjHg4TRseRt5OSqOV8DUqfBI= +github.com/goto/salt v0.3.7/go.mod h1:KR7LfsHQY3ae1iBEZDOlDrDZQZVRtDNdwpBttrcT5sE= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.5.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= -github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.9.0/go.mod h1:MNGWmViCgqbZck9ujOOBN63gK9XVGILXWCvKLGKmnms= -github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jeremywohl/flatten v1.0.1 h1:LrsxmB3hfwJuE+ptGOijix1PIfOoKLJ3Uee/mzbgtrs= github.com/jeremywohl/flatten v1.0.1/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO0nAlMHgfLQ= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70 h1:thTca5Eyouk5CEcJ75Cbw9CSAGE7TAc6rIi+WgHWpOE= +github.com/jzelinskie/stringz v0.0.0-20210414224931-d6a8ce844a70/go.mod h1:hHYbgxJuNLRw91CmpuFsYEOyQqpDVFg8pvEh23vy4P0= github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= -github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/k2io/hookingo v1.0.5 h1:MAuYIjpOf2IFs7UqEDrHntNBswWg7z7/I2XMQHogEio= +github.com/k2io/hookingo v1.0.5/go.mod h1:2L1jdNjdB3NkbzSVv9Q5fq7SJhRkWyAhe65XsAp5iXk= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= -github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= -github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mcuadros/go-defaults v1.2.0 h1:FODb8WSf0uGaY8elWJAkoLL0Ri6AlZ1bFlenk56oZtc= github.com/mcuadros/go-defaults v1.2.0/go.mod h1:WEZtHEVIGYVDqkKSWBdWKUVdRyKlMfulPaGDWIVeCWY= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.6 h1:ZOvqHKtnx0fUpnbQm3m3zKFWE+DRC+XB1onh8JoEObE= -github.com/microcosm-cc/bluemonday v1.0.6/go.mod h1:HOT/6NaBlR0f9XlxD3zolN6Z3N8Lp4pvhp+jLS5ihnI= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= +github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM= +github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg= +github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -1043,17 +482,11 @@ github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1063,476 +496,247 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/muesli/reflow v0.2.0 h1:2o0UBJPHHH4fa2GCXU4Rg4DwOtWPMekCeyc5EWbAQp0= -github.com/muesli/reflow v0.2.0/go.mod h1:qT22vjVmM9MIUeLgsVYe/Ye7eZlbv9dZjL3dVhUqLX8= -github.com/muesli/termenv v0.8.1/go.mod h1:kzt/D/4a88RoheZmwfqorY3A+tnsSMA9HJC/fQSFKo0= -github.com/muesli/termenv v0.9.0 h1:wnbOaGz+LUR3jNT0zOzinPnyDaCZUQRZj9GxK8eRVl8= -github.com/muesli/termenv v0.9.0/go.mod h1:R/LzAKf+suGs4IsO95y7+7DpFHO0KABgnZqtlyx2mBw= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.13.0/go.mod h1:sP1+uffeLaEYpyOTb8pLCUctGcGLnoFjSn4YJK5e2bc= +github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= +github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= +github.com/newrelic/csec-go-agent v0.4.0 h1:xvsVNshn0PztJzD2bnQiR1NkUzv+7LdEmvVFLqOVzco= +github.com/newrelic/csec-go-agent v0.4.0/go.mod h1:dM8F4FOZ/CX3h2zMQ5npGqNuND2E0noklLjAx8YUXBU= github.com/newrelic/go-agent/v3 v3.0.0/go.mod h1:H28zDNUC0U/b7kLoY4EFOhuth10Xu/9dchozUiOseQQ= -github.com/newrelic/go-agent/v3 v3.12.0/go.mod h1:1A1dssWBwzB7UemzRU6ZVaGDsI+cEn5/bNxI0wiYlIc= -github.com/newrelic/go-agent/v3 v3.17.0 h1:FLhxjckKKoJWB2rR14oNm+mYZLpktfEy+JKRSF5WgXw= -github.com/newrelic/go-agent/v3 v3.17.0/go.mod h1:BFJOlbZWRlPTXKYIC1TTTtQKTnYntEJaU0VU507hDc0= +github.com/newrelic/go-agent/v3 v3.25.1 h1:Fa+4apO08bcGJk9aOB0TlnacAOrXS4FzMYJzoG0ihA8= +github.com/newrelic/go-agent/v3 v3.25.1/go.mod h1:MANAXqchXM8ko+EXPZ+6mzX243/lehYwJWq8HOV2ytc= github.com/newrelic/go-agent/v3/integrations/nrgorilla v1.1.1 h1:9SyybWTkOSffuwCAp8oUcMZghFkGLWZUkPC/38AvjxU= github.com/newrelic/go-agent/v3/integrations/nrgorilla v1.1.1/go.mod h1:1XnCVdRSKjS5ikMycFh7VKXBkk0oYPaKQb+sd6aSCoA= -github.com/newrelic/go-agent/v3/integrations/nrgrpc v1.3.1 h1:/ar1Omo9luapTJYWXt86oQGBpWwpWF92x+UuYU9v/7o= -github.com/newrelic/go-agent/v3/integrations/nrgrpc v1.3.1/go.mod h1:2q0u6qkNJ4ClDt920A4r+NpcO370lFze1NF4OPJjAks= -github.com/newrelic/newrelic-opencensus-exporter-go v0.4.0 h1:BjzhyzSrzc8/WtyZDWBF8XATW4M92EoZiy38kgL3gfo= -github.com/newrelic/newrelic-opencensus-exporter-go v0.4.0/go.mod h1:gSMlmRnmdRq5c2NTNuh+JtBGXSvnJDqIudxiXwQ07m0= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= +github.com/newrelic/go-agent/v3/integrations/nrgrpc v1.4.1 h1:G9D/jUoEmIT8zCv5PU+1jJbXyzWnygiMoRZrmnEKSAo= +github.com/newrelic/go-agent/v3/integrations/nrgrpc v1.4.1/go.mod h1:MEK4BX1J5e1JW7KtujvhC5zAybelhIqejPFzy4HxO9M= +github.com/newrelic/go-agent/v3/integrations/nrsecurityagent v1.1.0 h1:gqkTDYUHWUyiG+u0PJQCRh98rcHLxP/w7GtIbJDVULY= +github.com/newrelic/go-agent/v3/integrations/nrsecurityagent v1.1.0/go.mod h1:3wugGvRmOVYov/08y+D8tB1uYIZds5bweVdr5vo4Gbs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/odpf/salt v0.2.1 h1:91TRXZ85XKeKUwq6jotcAD5eo40kP1GS3xZMvXK9xR0= -github.com/odpf/salt v0.2.1/go.mod h1:ZmDaHPtlwvOlluzv+qiC3qZj99hmfxlFRMqfqZx5IP8= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= +github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= -github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rubenv/sql-migrate v1.1.1 h1:haR5Hn8hbW9/SpAICrXoZqXnywS7Q5WijwkQENPeNWY= -github.com/rubenv/sql-migrate v1.1.1/go.mod h1:/7TZymwxN8VWumcIxw1jjHEcR1djpdkMHQPT4FWdnbQ= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= +github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/schollz/progressbar/v3 v3.8.5 h1:VcmmNRO+eFN3B0m5dta6FXYXY+MEJmXdWoIS+jjssQM= -github.com/schollz/progressbar/v3 v3.8.5/go.mod h1:ewO25kD7ZlaJFTvMeOItkOZa8kXu1UvFs379htE8HMQ= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/schollz/progressbar/v3 v3.13.1 h1:o8rySDYiQ59Mwzy2FELeHY5ZARXZTVJC7iHD6PEFUiE= +github.com/schollz/progressbar/v3 v3.13.1/go.mod h1:xvrbki8kfT1fzWzBT/UZd9L6GA+jdL7HAgq2RFnO6fQ= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.50.0 h1:H7fweIlBm0rXLs2q0XbalvJ6r0CUPFWK3/bB4N13e9M= +github.com/valyala/fasthttp v1.50.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.3/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark-emoji v1.0.1 h1:ctuWEyzGBwiucEqxzwe0SOYDXPAucOrE9NQC18Wa1os= +github.com/yuin/goldmark v1.3.7/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.5.2/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.5.6 h1:COmQAWTCcGetChm3Ig7G/t8AFAN00t+o8Mt4cf7JpwA= +github.com/yuin/goldmark v1.5.6/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark-emoji v1.0.1/go.mod h1:2w1E6FEWLcDQkoTE+7HU6QF1F6SLlNGjRIBbIZQFqkQ= +github.com/yuin/goldmark-emoji v1.0.2 h1:c/RgTShNgHTtc6xdz2KKI74jJr6rWi7FPgnP9GAsO5s= +github.com/yuin/goldmark-emoji v1.0.2/go.mod h1:RhP/RWpexdp+KHs7ghKnifRoIs/Bq4nDS7tRbCkOwKY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.buf.build/odpf/gw/envoyproxy/protoc-gen-validate v1.1.6/go.mod h1:Z+auCoGBL8lg1aQQgGe7R3HLnYKf6nbtTawTt0WmFCo= -go.buf.build/odpf/gw/grpc-ecosystem/grpc-gateway v1.1.35/go.mod h1:/LuddrGPi0fwj7ay6Orutt8oFfPz8Y3c8qdBkacJq1A= -go.buf.build/odpf/gw/grpc-ecosystem/grpc-gateway v1.1.44 h1:ya+Pdx7IBc+Uf+UImPPecJbdI7gpMz9gJI6/LUNjsbE= -go.buf.build/odpf/gw/grpc-ecosystem/grpc-gateway v1.1.44/go.mod h1:/LuddrGPi0fwj7ay6Orutt8oFfPz8Y3c8qdBkacJq1A= -go.buf.build/odpf/gw/odpf/proton v1.1.9/go.mod h1:I9E8CF7w/690vRNWqBU6qDcUbi3Pi2THdn1yycBVTDQ= -go.buf.build/odpf/gw/odpf/proton v1.1.122 h1:6NM4D8VwKIdq6F0A5nXnmxPp7LnzuwsGCeVxi3E1HOI= -go.buf.build/odpf/gw/odpf/proton v1.1.122/go.mod h1:FySqyI0YPPldpzXULKDcIC/bMJIdGaO6j36i1ZKJSvE= -go.buf.build/odpf/gwv/envoyproxy/protoc-gen-validate v1.1.7/go.mod h1:2Tg6rYIoDhpl39Zd2+WBOF9uG4XxAOs0bK2Z2/bwTOc= -go.buf.build/odpf/gwv/grpc-ecosystem/grpc-gateway v1.1.46/go.mod h1:UrBCdmHgaY/pLapYUMOq01c1yuzwT8AEBTsgpmzq2zo= -go.buf.build/odpf/gwv/odpf/proton v1.1.172 h1:cGk4ctsVhBK4d6mV+QVrJD0rWkXtDO+ogCA8l3BCkhk= -go.buf.build/odpf/gwv/odpf/proton v1.1.172/go.mod h1:V6NNZKrRPHjMkIPiSXvwUHks0D8bUGPXAjXUaujG/90= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/runtime v0.59.0 h1:rfi2MMujBc4yowE0iHckZX4o4jg6SA67EnFVL8ldVvU= +go.opentelemetry.io/contrib/instrumentation/runtime v0.59.0/go.mod h1:IO/gfPEcQYpOpPxn1OXFp1DvRY0viP8ONMedXLjjHIU= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0 h1:tfil6di0PoNV7FZdsCS7A5izZoVVQ7AuXtyekbOpG/I= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.45.0/go.mod h1:AKFZIEPOnqB00P63bTjOiah4ZTaRzl1TKwUWpZdYUHI= +go.opentelemetry.io/otel/exporters/prometheus v0.56.0 h1:GnCIi0QyG0yy2MrJLzVrIM7laaJstj//flf1zEJCG+E= +go.opentelemetry.io/otel/exporters/prometheus v0.56.0/go.mod h1:JQcVZtbIIPM+7SWBB+T6FK+xunlyidwLp++fN0sUaOk= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.starlark.net v0.0.0-20230912135651-745481cf39ed h1:kNt8RXSIU6IRBO9MP3m+6q3WpyBHQQXqSktcyVKDPOQ= +go.starlark.net v0.0.0-20230912135651-745481cf39ed/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= +golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1544,7 +748,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1555,38 +758,23 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1597,44 +785,23 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1643,221 +810,117 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1877,45 +940,28 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1929,44 +975,26 @@ google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.62.0 h1:PhGymJMXfGBzc4lBRmrx9+1w4w2wEzURHNGF/sD/xGc= -google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/api v0.141.0 h1:Df6vfMgDoIM6ss0m7H4MPwFwY87WNXHfBIda/Bmfl4E= +google.golang.org/api v0.141.0/go.mod h1:iZqLkdPlXKyG0b90eu6KxVSE4D/ccRF2e/doKD2CnQQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1975,7 +1003,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1987,62 +1014,29 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 h1:q1kiSVscqoDeqTF27eQ2NnLLDmqF0I373qQNXYMy0fo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -2052,26 +1046,11 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2085,69 +1064,35 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/datatypes v1.0.0/go.mod h1:aKpJ+RNhLXWeF5OAdxfzBwT1UPw1wseSchF0AY3/lSw= -gorm.io/driver/mysql v1.0.3/go.mod h1:twGxftLBlFgNVNakL7F+P/x9oYqoymG3YYT8cAfI9oI= -gorm.io/driver/postgres v1.0.5/go.mod h1:qrD92UurYzNctBMVCJ8C3VQEjffEuphycXtxOudXNCA= -gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= -gorm.io/driver/sqlite v1.1.3/go.mod h1:AKDgRWk8lcSQSw+9kxCJnX/yySj8G3rdwYlU57cB45c= -gorm.io/driver/sqlserver v1.0.5/go.mod h1:WI/bfZ+s9TigYXe3hb3XjNaUP0TqmTdXl11pECyLATs= -gorm.io/gorm v1.20.1/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.20.2/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.20.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.20.5/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= -gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= -helm.sh/helm/v3 v3.9.0 h1:qDSWViuF6SzZX5s5AB/NVRGWmdao7T5j4S4ebIkMGag= -helm.sh/helm/v3 v3.9.0/go.mod h1:fzZfyslcPAWwSdkXrXlpKexFeE2Dei8N27FFQWt+PN0= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +helm.sh/helm/v3 v3.12.3 h1:5y1+Sbty12t48T/t/CGNYUIME5BJ0WKfmW/sobYqkFg= +helm.sh/helm/v3 v3.12.3/go.mod h1:KPKQiX9IP5HX7o5YnnhViMnNuKiL/lJBVQ47GHe1R0k= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2155,128 +1100,42 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.24.0 h1:J0hann2hfxWr1hinZIDefw7Q96wmCBx6SSB8IY0MdDg= -k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= -k8s.io/apiextensions-apiserver v0.24.0 h1:JfgFqbA8gKJ/uDT++feAqk9jBIwNnL9YGdQvaI9DLtY= -k8s.io/apiextensions-apiserver v0.24.0/go.mod h1:iuVe4aEpe6827lvO6yWQVxiPSpPoSKVjkq+MIdg84cM= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ= -k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/apiserver v0.24.0 h1:GR7kGsjOMfilRvlG3Stxv/3uz/ryvJ/aZXc5pqdsNV0= -k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA= -k8s.io/cli-runtime v0.24.0 h1:ot3Qf49T852uEyNApABO1UHHpFIckKK/NqpheZYN2gM= -k8s.io/cli-runtime v0.24.0/go.mod h1:9XxoZDsEkRFUThnwqNviqzljtT/LdHtNWvcNFrAXl0A= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U= -k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= -k8s.io/component-base v0.24.0 h1:h5jieHZQoHrY/lHG+HyrSbJeyfuitheBvqvKwKHVC0g= -k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA= -k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kubectl v0.24.0 h1:nA+WtMLVdXUs4wLogGd1mPTAesnLdBpCVgCmz3I7dXo= -k8s.io/kubectl v0.24.0/go.mod h1:pdXkmCyHiRTqjYfyUJiXtbVNURhv0/Q1TyRhy2d5ic0= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.24.0/go.mod h1:jrLlFGdKl3X+szubOXPG0Lf2aVxuV3QJcbsgVRAM6fI= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= -modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= -modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= -modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= -modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= -modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= -modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= -modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= -modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= -modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= -modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= -modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= -oras.land/oras-go v1.1.0 h1:tfWM1RT7PzUwWphqHU6ptPU3ZhwVnSw/9nEGf519rYg= -oras.land/oras-go v1.1.0/go.mod h1:1A7vR/0KknT2UkJVWh+xMi95I/AhK8ZrxrnUSmXN0bQ= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU= +k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/apiserver v0.28.2 h1:rBeYkLvF94Nku9XfXyUIirsVzCzJBs6jMn3NWeHieyI= +k8s.io/apiserver v0.28.2/go.mod h1:f7D5e8wH8MWcKD7azq6Csw9UN+CjdtXIVQUyUhrtb+E= +k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= +k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= +k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d h1:/CFeJBjBrZvHX09rObS2+2iEEDevMWYc1v3aIYAjIYI= +k8s.io/kube-openapi v0.0.0-20230918164632-68afd615200d/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.28.2 h1:fOWOtU6S0smdNjG1PB9WFbqEIMlkzU5ahyHkc7ESHgM= +k8s.io/kubectl v0.28.2/go.mod h1:6EQWTPySF1fn7yKoQZHYf9TPwIl2AygHEcJoxFekr64= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= +oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo= -sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= -sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= -sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs= -sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kind v0.23.0 h1:8fyDGWbWTeCcCTwA04v4Nfr45KKxbSPH1WO9K+jVrBg= +sigs.k8s.io/kind v0.23.0/go.mod h1:ZQ1iZuJLh3T+O8fzhdi3VWcFTzsdXtNv2ppsHc8JQ7s= +sigs.k8s.io/kustomize/api v0.14.0 h1:6+QLmXXA8X4eDM7ejeaNUyruA1DDB3PVIjbpVhDOJRA= +sigs.k8s.io/kustomize/api v0.14.0/go.mod h1:vmOXlC8BcmcUJQjiceUbcyQ75JBP6eg8sgoyzc+eLpQ= +sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= +sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/internal/server/middlewares.go b/internal/server/middlewares.go index f356c5af..74ea4187 100644 --- a/internal/server/middlewares.go +++ b/internal/server/middlewares.go @@ -1,7 +1,10 @@ package server import ( + "bytes" + "encoding/json" "fmt" + "io" "net/http" "strings" "time" @@ -22,7 +25,8 @@ const ( type wrappedWriter struct { http.ResponseWriter - Status int + Status int + ResponseBuffer *bytes.Buffer } func (wr *wrappedWriter) WriteHeader(statusCode int) { @@ -30,6 +34,15 @@ func (wr *wrappedWriter) WriteHeader(statusCode int) { wr.ResponseWriter.WriteHeader(statusCode) } +func (wr *wrappedWriter) Write(data []byte) (int, error) { + // write to the buffer to capture the response body + if wr.ResponseBuffer != nil { + wr.ResponseBuffer.Write(data) + } + // write to the actual ResponseWriter + return wr.ResponseWriter.Write(data) +} + func withOpenCensus() gorillamux.MiddlewareFunc { return func(next http.Handler) http.Handler { oc := &ochttp.Handler{ @@ -78,32 +91,60 @@ func requestID() gorillamux.MiddlewareFunc { } } -func requestLogger(lg *zap.Logger) gorillamux.MiddlewareFunc { +func requestLogger() gorillamux.MiddlewareFunc { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(wr http.ResponseWriter, req *http.Request) { t := time.Now() span := trace.FromContext(req.Context()) clientID, _, _ := req.BasicAuth() + + wrapped := &wrappedWriter{ + Status: http.StatusOK, + ResponseWriter: wr, + ResponseBuffer: &bytes.Buffer{}, + } + + bodyBytes, err := io.ReadAll(req.Body) + if err != nil { + zap.L().Error("error reading request body: %v", zap.String("error", err.Error())) + return + } + reader := io.NopCloser(bytes.NewBuffer(bodyBytes)) + req.Body = reader + + next.ServeHTTP(wrapped, req) + + if req.URL.Path == "/ping" { + return + } + fields := []zap.Field{ - zap.String("path", req.URL.Path), + zap.Time("timestamp", time.Now().UTC()), zap.String("method", req.Method), + zap.Int("status", wrapped.Status), + zap.String("path", req.URL.Path), + zap.Duration("response_time", time.Since(t)), zap.String("request_id", req.Header.Get(headerRequestID)), zap.String("client_id", clientID), zap.String("trace_id", span.SpanContext().TraceID.String()), } - wrapped := &wrappedWriter{ResponseWriter: wr} - next.ServeHTTP(wrapped, req) - fields = append(fields, - zap.Duration("response_time", time.Since(t)), - zap.Int("status", wrapped.Status), - ) + if len(bodyBytes) > 0 { + dst := bytes.NewBuffer(nil) + err = json.Compact(dst, bodyBytes) + if err != nil { + zap.L().Error("error json compacting request body: %v", zap.String("error", err.Error())) + } else { + fields = append(fields, zap.String("request_body", dst.String())) + } + } if !is2xx(wrapped.Status) { - lg.Warn("request handled with non-2xx response", fields...) + fields = append(fields, zap.String("response_body", wrapped.ResponseBuffer.String())) + zap.L().Error("request handled with non-2xx response", fields...) } else { - lg.Info("request handled", fields...) + zap.L().Info("request handled", fields...) } }) } diff --git a/internal/server/server.go b/internal/server/server.go index 90e70373..f3325e0f 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -7,55 +7,64 @@ import ( "time" gorillamux "github.com/gorilla/mux" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" + grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpczap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + grpcrecovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + grpcctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/newrelic/go-agent/v3/integrations/nrgorilla" "github.com/newrelic/go-agent/v3/integrations/nrgrpc" "github.com/newrelic/go-agent/v3/newrelic" - "github.com/odpf/salt/common" - "github.com/odpf/salt/mux" - commonv1 "go.buf.build/odpf/gw/odpf/proton/odpf/common/v1" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" "go.opencensus.io/plugin/ocgrpc" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/reflection" "google.golang.org/protobuf/encoding/protojson" - modulesv1 "github.com/odpf/entropy/internal/server/v1/modules" - resourcesv1 "github.com/odpf/entropy/internal/server/v1/resources" - "github.com/odpf/entropy/pkg/version" + "github.com/goto/entropy/internal/server/serverutils" + modulesv1 "github.com/goto/entropy/internal/server/v1/modules" + resourcesv1 "github.com/goto/entropy/internal/server/v1/resources" + "github.com/goto/entropy/pkg/common" + "github.com/goto/entropy/pkg/version" + commonv1 "github.com/goto/entropy/proto/gotocompany/common/v1" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/salt/mux" ) -const defaultGracePeriod = 5 * time.Second +const ( + gracePeriod = 5 * time.Second + readTimeout = 120 * time.Second + writeTimeout = 600 * time.Second + maxHeaderBytes = 1 << 20 +) // Serve initialises all the gRPC+HTTP API routes, starts listening for requests at addr, and blocks until server exits. // Server exits gracefully when context is cancelled. -func Serve(ctx context.Context, addr string, nrApp *newrelic.Application, logger *zap.Logger, +func Serve(ctx context.Context, httpAddr, grpcAddr string, nrApp *newrelic.Application, resourceSvc resourcesv1.ResourceService, moduleSvc modulesv1.ModuleService, ) error { grpcOpts := []grpc.ServerOption{ - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - grpc_recovery.UnaryServerInterceptor(), - grpc_ctxtags.UnaryServerInterceptor(), - grpc_zap.UnaryServerInterceptor(logger), + grpc.UnaryInterceptor(grpcmiddleware.ChainUnaryServer( + grpcrecovery.UnaryServerInterceptor(), + grpcctxtags.UnaryServerInterceptor(), + grpczap.UnaryServerInterceptor(zap.L()), nrgrpc.UnaryServerInterceptor(nrApp), )), grpc.StatsHandler(&ocgrpc.ServerHandler{}), } grpcServer := grpc.NewServer(grpcOpts...) - rpcHTTPGateway := runtime.NewServeMux(runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ - MarshalOptions: protojson.MarshalOptions{ - UseProtoNames: true, - EmitUnpopulated: true, - }, - UnmarshalOptions: protojson.UnmarshalOptions{ - DiscardUnknown: true, - }, - })) + rpcHTTPGateway := runtime.NewServeMux( + runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + UseProtoNames: true, + EmitUnpopulated: true, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }), + runtime.WithMetadata(serverutils.ExtractRequestMetadata), + ) reflection.Register(grpcServer) @@ -65,7 +74,9 @@ func Serve(ctx context.Context, addr string, nrApp *newrelic.Application, logger return err } - resourceServiceRPC := resourcesv1.NewAPIServer(resourceSvc) + resourceServiceRPC := &resourcesv1.LogWrapper{ + ResourceServiceServer: resourcesv1.NewAPIServer(resourceSvc), + } grpcServer.RegisterService(&entropyv1beta1.ResourceService_ServiceDesc, resourceServiceRPC) if err := entropyv1beta1.RegisterResourceServiceHandlerServer(ctx, rpcHTTPGateway, resourceServiceRPC); err != nil { return err @@ -87,13 +98,21 @@ func Serve(ctx context.Context, addr string, nrApp *newrelic.Application, logger httpRouter.Use( requestID(), withOpenCensus(), - requestLogger(logger), // nolint + requestLogger(), ) - logger.Info("starting server", zap.String("addr", addr)) - return mux.Serve(ctx, addr, - mux.WithHTTP(httpRouter), - mux.WithGRPC(grpcServer), - mux.WithGracePeriod(defaultGracePeriod), + zap.L().Info("starting http & grpc servers", + zap.String("http_addr", httpAddr), + zap.String("grpc_addr", grpcAddr), + ) + return mux.Serve(ctx, + mux.WithHTTPTarget(httpAddr, &http.Server{ + Handler: httpRouter, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + MaxHeaderBytes: maxHeaderBytes, + }), + mux.WithGRPCTarget(grpcAddr, grpcServer), + mux.WithGracePeriod(gracePeriod), ) } diff --git a/internal/server/serverutils/context.go b/internal/server/serverutils/context.go new file mode 100644 index 00000000..101b19f4 --- /dev/null +++ b/internal/server/serverutils/context.go @@ -0,0 +1,38 @@ +package serverutils + +import ( + "context" + "net/http" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const userIDHeader = "user-id" + +func GetUserIdentifier(ctx context.Context) (string, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", status.Errorf(codes.DataLoss, "failed to get metadata") + } + + xrid := md[userIDHeader] + if len(xrid) == 0 { + return "", status.Errorf(codes.InvalidArgument, "missing '%s' header", userIDHeader) + } + + userID := strings.TrimSpace(xrid[0]) + if userID == "" { + return "", status.Errorf(codes.InvalidArgument, "empty '%s' header", userIDHeader) + } + + return userID, nil +} + +func ExtractRequestMetadata(_ context.Context, request *http.Request) metadata.MD { + header := request.Header.Get(userIDHeader) + md := metadata.Pairs(userIDHeader, header) + return md +} diff --git a/internal/server/serverutils/grpcerror.go b/internal/server/serverutils/grpcerror.go index 4eed22b6..334abe39 100644 --- a/internal/server/serverutils/grpcerror.go +++ b/internal/server/serverutils/grpcerror.go @@ -4,7 +4,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) // ToRPCError returns an instance of gRPC Error Status equivalent to the diff --git a/internal/server/v1/mocks/module_service.go b/internal/server/v1/mocks/module_service.go index cd41e869..7ecaabe8 100644 --- a/internal/server/v1/mocks/module_service.go +++ b/internal/server/v1/mocks/module_service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" - module "github.com/odpf/entropy/core/module" + module "github.com/goto/entropy/core/module" ) // ModuleService is an autogenerated mock type for the ModuleService type @@ -28,7 +28,15 @@ func (_m *ModuleService) EXPECT() *ModuleService_Expecter { func (_m *ModuleService) CreateModule(ctx context.Context, mod module.Module) (*module.Module, error) { ret := _m.Called(ctx, mod) + if len(ret) == 0 { + panic("no return value specified for CreateModule") + } + var r0 *module.Module + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, module.Module) (*module.Module, error)); ok { + return rf(ctx, mod) + } if rf, ok := ret.Get(0).(func(context.Context, module.Module) *module.Module); ok { r0 = rf(ctx, mod) } else { @@ -37,7 +45,6 @@ func (_m *ModuleService) CreateModule(ctx context.Context, mod module.Module) (* } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, module.Module) error); ok { r1 = rf(ctx, mod) } else { @@ -53,8 +60,8 @@ type ModuleService_CreateModule_Call struct { } // CreateModule is a helper method to define mock.On call -// - ctx context.Context -// - mod module.Module +// - ctx context.Context +// - mod module.Module func (_e *ModuleService_Expecter) CreateModule(ctx interface{}, mod interface{}) *ModuleService_CreateModule_Call { return &ModuleService_CreateModule_Call{Call: _e.mock.On("CreateModule", ctx, mod)} } @@ -71,10 +78,19 @@ func (_c *ModuleService_CreateModule_Call) Return(_a0 *module.Module, _a1 error) return _c } +func (_c *ModuleService_CreateModule_Call) RunAndReturn(run func(context.Context, module.Module) (*module.Module, error)) *ModuleService_CreateModule_Call { + _c.Call.Return(run) + return _c +} + // DeleteModule provides a mock function with given fields: ctx, urn func (_m *ModuleService) DeleteModule(ctx context.Context, urn string) error { ret := _m.Called(ctx, urn) + if len(ret) == 0 { + panic("no return value specified for DeleteModule") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, urn) @@ -91,8 +107,8 @@ type ModuleService_DeleteModule_Call struct { } // DeleteModule is a helper method to define mock.On call -// - ctx context.Context -// - urn string +// - ctx context.Context +// - urn string func (_e *ModuleService_Expecter) DeleteModule(ctx interface{}, urn interface{}) *ModuleService_DeleteModule_Call { return &ModuleService_DeleteModule_Call{Call: _e.mock.On("DeleteModule", ctx, urn)} } @@ -109,11 +125,24 @@ func (_c *ModuleService_DeleteModule_Call) Return(_a0 error) *ModuleService_Dele return _c } +func (_c *ModuleService_DeleteModule_Call) RunAndReturn(run func(context.Context, string) error) *ModuleService_DeleteModule_Call { + _c.Call.Return(run) + return _c +} + // GetModule provides a mock function with given fields: ctx, urn func (_m *ModuleService) GetModule(ctx context.Context, urn string) (*module.Module, error) { ret := _m.Called(ctx, urn) + if len(ret) == 0 { + panic("no return value specified for GetModule") + } + var r0 *module.Module + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*module.Module, error)); ok { + return rf(ctx, urn) + } if rf, ok := ret.Get(0).(func(context.Context, string) *module.Module); ok { r0 = rf(ctx, urn) } else { @@ -122,7 +151,6 @@ func (_m *ModuleService) GetModule(ctx context.Context, urn string) (*module.Mod } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, urn) } else { @@ -138,8 +166,8 @@ type ModuleService_GetModule_Call struct { } // GetModule is a helper method to define mock.On call -// - ctx context.Context -// - urn string +// - ctx context.Context +// - urn string func (_e *ModuleService_Expecter) GetModule(ctx interface{}, urn interface{}) *ModuleService_GetModule_Call { return &ModuleService_GetModule_Call{Call: _e.mock.On("GetModule", ctx, urn)} } @@ -156,11 +184,24 @@ func (_c *ModuleService_GetModule_Call) Return(_a0 *module.Module, _a1 error) *M return _c } +func (_c *ModuleService_GetModule_Call) RunAndReturn(run func(context.Context, string) (*module.Module, error)) *ModuleService_GetModule_Call { + _c.Call.Return(run) + return _c +} + // ListModules provides a mock function with given fields: ctx, project func (_m *ModuleService) ListModules(ctx context.Context, project string) ([]module.Module, error) { ret := _m.Called(ctx, project) + if len(ret) == 0 { + panic("no return value specified for ListModules") + } + var r0 []module.Module + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]module.Module, error)); ok { + return rf(ctx, project) + } if rf, ok := ret.Get(0).(func(context.Context, string) []module.Module); ok { r0 = rf(ctx, project) } else { @@ -169,7 +210,6 @@ func (_m *ModuleService) ListModules(ctx context.Context, project string) ([]mod } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, project) } else { @@ -185,8 +225,8 @@ type ModuleService_ListModules_Call struct { } // ListModules is a helper method to define mock.On call -// - ctx context.Context -// - project string +// - ctx context.Context +// - project string func (_e *ModuleService_Expecter) ListModules(ctx interface{}, project interface{}) *ModuleService_ListModules_Call { return &ModuleService_ListModules_Call{Call: _e.mock.On("ListModules", ctx, project)} } @@ -203,11 +243,24 @@ func (_c *ModuleService_ListModules_Call) Return(_a0 []module.Module, _a1 error) return _c } +func (_c *ModuleService_ListModules_Call) RunAndReturn(run func(context.Context, string) ([]module.Module, error)) *ModuleService_ListModules_Call { + _c.Call.Return(run) + return _c +} + // UpdateModule provides a mock function with given fields: ctx, urn, newConfigs func (_m *ModuleService) UpdateModule(ctx context.Context, urn string, newConfigs json.RawMessage) (*module.Module, error) { ret := _m.Called(ctx, urn, newConfigs) + if len(ret) == 0 { + panic("no return value specified for UpdateModule") + } + var r0 *module.Module + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, json.RawMessage) (*module.Module, error)); ok { + return rf(ctx, urn, newConfigs) + } if rf, ok := ret.Get(0).(func(context.Context, string, json.RawMessage) *module.Module); ok { r0 = rf(ctx, urn, newConfigs) } else { @@ -216,7 +269,6 @@ func (_m *ModuleService) UpdateModule(ctx context.Context, urn string, newConfig } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, json.RawMessage) error); ok { r1 = rf(ctx, urn, newConfigs) } else { @@ -232,9 +284,9 @@ type ModuleService_UpdateModule_Call struct { } // UpdateModule is a helper method to define mock.On call -// - ctx context.Context -// - urn string -// - newConfigs json.RawMessage +// - ctx context.Context +// - urn string +// - newConfigs json.RawMessage func (_e *ModuleService_Expecter) UpdateModule(ctx interface{}, urn interface{}, newConfigs interface{}) *ModuleService_UpdateModule_Call { return &ModuleService_UpdateModule_Call{Call: _e.mock.On("UpdateModule", ctx, urn, newConfigs)} } @@ -250,3 +302,22 @@ func (_c *ModuleService_UpdateModule_Call) Return(_a0 *module.Module, _a1 error) _c.Call.Return(_a0, _a1) return _c } + +func (_c *ModuleService_UpdateModule_Call) RunAndReturn(run func(context.Context, string, json.RawMessage) (*module.Module, error)) *ModuleService_UpdateModule_Call { + _c.Call.Return(run) + return _c +} + +// NewModuleService creates a new instance of ModuleService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewModuleService(t interface { + mock.TestingT + Cleanup(func()) +}) *ModuleService { + mock := &ModuleService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/server/v1/mocks/resource_service.go b/internal/server/v1/mocks/resource_service.go index b6ebfb14..404a1ef4 100644 --- a/internal/server/v1/mocks/resource_service.go +++ b/internal/server/v1/mocks/resource_service.go @@ -1,14 +1,16 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.43.2. DO NOT EDIT. package mocks import ( context "context" - module "github.com/odpf/entropy/core/module" + core "github.com/goto/entropy/core" mock "github.com/stretchr/testify/mock" - resource "github.com/odpf/entropy/core/resource" + module "github.com/goto/entropy/core/module" + + resource "github.com/goto/entropy/core/resource" ) // ResourceService is an autogenerated mock type for the ResourceService type @@ -24,22 +26,36 @@ func (_m *ResourceService) EXPECT() *ResourceService_Expecter { return &ResourceService_Expecter{mock: &_m.Mock} } -// ApplyAction provides a mock function with given fields: ctx, urn, action -func (_m *ResourceService) ApplyAction(ctx context.Context, urn string, action module.ActionRequest) (*resource.Resource, error) { - ret := _m.Called(ctx, urn, action) +// ApplyAction provides a mock function with given fields: ctx, urn, action, resourceOpts +func (_m *ResourceService) ApplyAction(ctx context.Context, urn string, action module.ActionRequest, resourceOpts ...core.Options) (*resource.Resource, error) { + _va := make([]interface{}, len(resourceOpts)) + for _i := range resourceOpts { + _va[_i] = resourceOpts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, urn, action) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ApplyAction") + } var r0 *resource.Resource - if rf, ok := ret.Get(0).(func(context.Context, string, module.ActionRequest) *resource.Resource); ok { - r0 = rf(ctx, urn, action) + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, module.ActionRequest, ...core.Options) (*resource.Resource, error)); ok { + return rf(ctx, urn, action, resourceOpts...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, module.ActionRequest, ...core.Options) *resource.Resource); ok { + r0 = rf(ctx, urn, action, resourceOpts...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*resource.Resource) } } - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, module.ActionRequest) error); ok { - r1 = rf(ctx, urn, action) + if rf, ok := ret.Get(1).(func(context.Context, string, module.ActionRequest, ...core.Options) error); ok { + r1 = rf(ctx, urn, action, resourceOpts...) } else { r1 = ret.Error(1) } @@ -53,16 +69,24 @@ type ResourceService_ApplyAction_Call struct { } // ApplyAction is a helper method to define mock.On call -// - ctx context.Context -// - urn string -// - action module.ActionRequest -func (_e *ResourceService_Expecter) ApplyAction(ctx interface{}, urn interface{}, action interface{}) *ResourceService_ApplyAction_Call { - return &ResourceService_ApplyAction_Call{Call: _e.mock.On("ApplyAction", ctx, urn, action)} +// - ctx context.Context +// - urn string +// - action module.ActionRequest +// - resourceOpts ...core.Options +func (_e *ResourceService_Expecter) ApplyAction(ctx interface{}, urn interface{}, action interface{}, resourceOpts ...interface{}) *ResourceService_ApplyAction_Call { + return &ResourceService_ApplyAction_Call{Call: _e.mock.On("ApplyAction", + append([]interface{}{ctx, urn, action}, resourceOpts...)...)} } -func (_c *ResourceService_ApplyAction_Call) Run(run func(ctx context.Context, urn string, action module.ActionRequest)) *ResourceService_ApplyAction_Call { +func (_c *ResourceService_ApplyAction_Call) Run(run func(ctx context.Context, urn string, action module.ActionRequest, resourceOpts ...core.Options)) *ResourceService_ApplyAction_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(module.ActionRequest)) + variadicArgs := make([]core.Options, len(args)-3) + for i, a := range args[3:] { + if a != nil { + variadicArgs[i] = a.(core.Options) + } + } + run(args[0].(context.Context), args[1].(string), args[2].(module.ActionRequest), variadicArgs...) }) return _c } @@ -72,22 +96,41 @@ func (_c *ResourceService_ApplyAction_Call) Return(_a0 *resource.Resource, _a1 e return _c } -// CreateResource provides a mock function with given fields: ctx, res -func (_m *ResourceService) CreateResource(ctx context.Context, res resource.Resource) (*resource.Resource, error) { - ret := _m.Called(ctx, res) +func (_c *ResourceService_ApplyAction_Call) RunAndReturn(run func(context.Context, string, module.ActionRequest, ...core.Options) (*resource.Resource, error)) *ResourceService_ApplyAction_Call { + _c.Call.Return(run) + return _c +} + +// CreateResource provides a mock function with given fields: ctx, res, resourceOpts +func (_m *ResourceService) CreateResource(ctx context.Context, res resource.Resource, resourceOpts ...core.Options) (*resource.Resource, error) { + _va := make([]interface{}, len(resourceOpts)) + for _i := range resourceOpts { + _va[_i] = resourceOpts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, res) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateResource") + } var r0 *resource.Resource - if rf, ok := ret.Get(0).(func(context.Context, resource.Resource) *resource.Resource); ok { - r0 = rf(ctx, res) + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, resource.Resource, ...core.Options) (*resource.Resource, error)); ok { + return rf(ctx, res, resourceOpts...) + } + if rf, ok := ret.Get(0).(func(context.Context, resource.Resource, ...core.Options) *resource.Resource); ok { + r0 = rf(ctx, res, resourceOpts...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*resource.Resource) } } - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, resource.Resource) error); ok { - r1 = rf(ctx, res) + if rf, ok := ret.Get(1).(func(context.Context, resource.Resource, ...core.Options) error); ok { + r1 = rf(ctx, res, resourceOpts...) } else { r1 = ret.Error(1) } @@ -101,15 +144,23 @@ type ResourceService_CreateResource_Call struct { } // CreateResource is a helper method to define mock.On call -// - ctx context.Context -// - res resource.Resource -func (_e *ResourceService_Expecter) CreateResource(ctx interface{}, res interface{}) *ResourceService_CreateResource_Call { - return &ResourceService_CreateResource_Call{Call: _e.mock.On("CreateResource", ctx, res)} +// - ctx context.Context +// - res resource.Resource +// - resourceOpts ...core.Options +func (_e *ResourceService_Expecter) CreateResource(ctx interface{}, res interface{}, resourceOpts ...interface{}) *ResourceService_CreateResource_Call { + return &ResourceService_CreateResource_Call{Call: _e.mock.On("CreateResource", + append([]interface{}{ctx, res}, resourceOpts...)...)} } -func (_c *ResourceService_CreateResource_Call) Run(run func(ctx context.Context, res resource.Resource)) *ResourceService_CreateResource_Call { +func (_c *ResourceService_CreateResource_Call) Run(run func(ctx context.Context, res resource.Resource, resourceOpts ...core.Options)) *ResourceService_CreateResource_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(resource.Resource)) + variadicArgs := make([]core.Options, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(core.Options) + } + } + run(args[0].(context.Context), args[1].(resource.Resource), variadicArgs...) }) return _c } @@ -119,10 +170,19 @@ func (_c *ResourceService_CreateResource_Call) Return(_a0 *resource.Resource, _a return _c } +func (_c *ResourceService_CreateResource_Call) RunAndReturn(run func(context.Context, resource.Resource, ...core.Options) (*resource.Resource, error)) *ResourceService_CreateResource_Call { + _c.Call.Return(run) + return _c +} + // DeleteResource provides a mock function with given fields: ctx, urn func (_m *ResourceService) DeleteResource(ctx context.Context, urn string) error { ret := _m.Called(ctx, urn) + if len(ret) == 0 { + panic("no return value specified for DeleteResource") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, urn) @@ -139,8 +199,8 @@ type ResourceService_DeleteResource_Call struct { } // DeleteResource is a helper method to define mock.On call -// - ctx context.Context -// - urn string +// - ctx context.Context +// - urn string func (_e *ResourceService_Expecter) DeleteResource(ctx interface{}, urn interface{}) *ResourceService_DeleteResource_Call { return &ResourceService_DeleteResource_Call{Call: _e.mock.On("DeleteResource", ctx, urn)} } @@ -157,11 +217,24 @@ func (_c *ResourceService_DeleteResource_Call) Return(_a0 error) *ResourceServic return _c } +func (_c *ResourceService_DeleteResource_Call) RunAndReturn(run func(context.Context, string) error) *ResourceService_DeleteResource_Call { + _c.Call.Return(run) + return _c +} + // GetLog provides a mock function with given fields: ctx, urn, filter func (_m *ResourceService) GetLog(ctx context.Context, urn string, filter map[string]string) (<-chan module.LogChunk, error) { ret := _m.Called(ctx, urn, filter) + if len(ret) == 0 { + panic("no return value specified for GetLog") + } + var r0 <-chan module.LogChunk + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, map[string]string) (<-chan module.LogChunk, error)); ok { + return rf(ctx, urn, filter) + } if rf, ok := ret.Get(0).(func(context.Context, string, map[string]string) <-chan module.LogChunk); ok { r0 = rf(ctx, urn, filter) } else { @@ -170,7 +243,6 @@ func (_m *ResourceService) GetLog(ctx context.Context, urn string, filter map[st } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, map[string]string) error); ok { r1 = rf(ctx, urn, filter) } else { @@ -186,9 +258,9 @@ type ResourceService_GetLog_Call struct { } // GetLog is a helper method to define mock.On call -// - ctx context.Context -// - urn string -// - filter map[string]string +// - ctx context.Context +// - urn string +// - filter map[string]string func (_e *ResourceService_Expecter) GetLog(ctx interface{}, urn interface{}, filter interface{}) *ResourceService_GetLog_Call { return &ResourceService_GetLog_Call{Call: _e.mock.On("GetLog", ctx, urn, filter)} } @@ -205,11 +277,24 @@ func (_c *ResourceService_GetLog_Call) Return(_a0 <-chan module.LogChunk, _a1 er return _c } +func (_c *ResourceService_GetLog_Call) RunAndReturn(run func(context.Context, string, map[string]string) (<-chan module.LogChunk, error)) *ResourceService_GetLog_Call { + _c.Call.Return(run) + return _c +} + // GetResource provides a mock function with given fields: ctx, urn func (_m *ResourceService) GetResource(ctx context.Context, urn string) (*resource.Resource, error) { ret := _m.Called(ctx, urn) + if len(ret) == 0 { + panic("no return value specified for GetResource") + } + var r0 *resource.Resource + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*resource.Resource, error)); ok { + return rf(ctx, urn) + } if rf, ok := ret.Get(0).(func(context.Context, string) *resource.Resource); ok { r0 = rf(ctx, urn) } else { @@ -218,7 +303,6 @@ func (_m *ResourceService) GetResource(ctx context.Context, urn string) (*resour } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { r1 = rf(ctx, urn) } else { @@ -234,8 +318,8 @@ type ResourceService_GetResource_Call struct { } // GetResource is a helper method to define mock.On call -// - ctx context.Context -// - urn string +// - ctx context.Context +// - urn string func (_e *ResourceService_Expecter) GetResource(ctx interface{}, urn interface{}) *ResourceService_GetResource_Call { return &ResourceService_GetResource_Call{Call: _e.mock.On("GetResource", ctx, urn)} } @@ -252,11 +336,24 @@ func (_c *ResourceService_GetResource_Call) Return(_a0 *resource.Resource, _a1 e return _c } +func (_c *ResourceService_GetResource_Call) RunAndReturn(run func(context.Context, string) (*resource.Resource, error)) *ResourceService_GetResource_Call { + _c.Call.Return(run) + return _c +} + // GetRevisions provides a mock function with given fields: ctx, selector func (_m *ResourceService) GetRevisions(ctx context.Context, selector resource.RevisionsSelector) ([]resource.Revision, error) { ret := _m.Called(ctx, selector) + if len(ret) == 0 { + panic("no return value specified for GetRevisions") + } + var r0 []resource.Revision + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, resource.RevisionsSelector) ([]resource.Revision, error)); ok { + return rf(ctx, selector) + } if rf, ok := ret.Get(0).(func(context.Context, resource.RevisionsSelector) []resource.Revision); ok { r0 = rf(ctx, selector) } else { @@ -265,7 +362,6 @@ func (_m *ResourceService) GetRevisions(ctx context.Context, selector resource.R } } - var r1 error if rf, ok := ret.Get(1).(func(context.Context, resource.RevisionsSelector) error); ok { r1 = rf(ctx, selector) } else { @@ -281,8 +377,8 @@ type ResourceService_GetRevisions_Call struct { } // GetRevisions is a helper method to define mock.On call -// - ctx context.Context -// - selector resource.RevisionsSelector +// - ctx context.Context +// - selector resource.RevisionsSelector func (_e *ResourceService_Expecter) GetRevisions(ctx interface{}, selector interface{}) *ResourceService_GetRevisions_Call { return &ResourceService_GetRevisions_Call{Call: _e.mock.On("GetRevisions", ctx, selector)} } @@ -299,22 +395,32 @@ func (_c *ResourceService_GetRevisions_Call) Return(_a0 []resource.Revision, _a1 return _c } -// ListResources provides a mock function with given fields: ctx, filter -func (_m *ResourceService) ListResources(ctx context.Context, filter resource.Filter) ([]resource.Resource, error) { - ret := _m.Called(ctx, filter) +func (_c *ResourceService_GetRevisions_Call) RunAndReturn(run func(context.Context, resource.RevisionsSelector) ([]resource.Revision, error)) *ResourceService_GetRevisions_Call { + _c.Call.Return(run) + return _c +} - var r0 []resource.Resource - if rf, ok := ret.Get(0).(func(context.Context, resource.Filter) []resource.Resource); ok { - r0 = rf(ctx, filter) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]resource.Resource) - } +// ListResources provides a mock function with given fields: ctx, filter, withSpecConfigs +func (_m *ResourceService) ListResources(ctx context.Context, filter resource.Filter, withSpecConfigs bool) (resource.PagedResource, error) { + ret := _m.Called(ctx, filter, withSpecConfigs) + + if len(ret) == 0 { + panic("no return value specified for ListResources") } + var r0 resource.PagedResource var r1 error - if rf, ok := ret.Get(1).(func(context.Context, resource.Filter) error); ok { - r1 = rf(ctx, filter) + if rf, ok := ret.Get(0).(func(context.Context, resource.Filter, bool) (resource.PagedResource, error)); ok { + return rf(ctx, filter, withSpecConfigs) + } + if rf, ok := ret.Get(0).(func(context.Context, resource.Filter, bool) resource.PagedResource); ok { + r0 = rf(ctx, filter, withSpecConfigs) + } else { + r0 = ret.Get(0).(resource.PagedResource) + } + + if rf, ok := ret.Get(1).(func(context.Context, resource.Filter, bool) error); ok { + r1 = rf(ctx, filter, withSpecConfigs) } else { r1 = ret.Error(1) } @@ -328,40 +434,60 @@ type ResourceService_ListResources_Call struct { } // ListResources is a helper method to define mock.On call -// - ctx context.Context -// - filter resource.Filter -func (_e *ResourceService_Expecter) ListResources(ctx interface{}, filter interface{}) *ResourceService_ListResources_Call { - return &ResourceService_ListResources_Call{Call: _e.mock.On("ListResources", ctx, filter)} +// - ctx context.Context +// - filter resource.Filter +// - withSpecConfigs bool +func (_e *ResourceService_Expecter) ListResources(ctx interface{}, filter interface{}, withSpecConfigs interface{}) *ResourceService_ListResources_Call { + return &ResourceService_ListResources_Call{Call: _e.mock.On("ListResources", ctx, filter, withSpecConfigs)} } -func (_c *ResourceService_ListResources_Call) Run(run func(ctx context.Context, filter resource.Filter)) *ResourceService_ListResources_Call { +func (_c *ResourceService_ListResources_Call) Run(run func(ctx context.Context, filter resource.Filter, withSpecConfigs bool)) *ResourceService_ListResources_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(resource.Filter)) + run(args[0].(context.Context), args[1].(resource.Filter), args[2].(bool)) }) return _c } -func (_c *ResourceService_ListResources_Call) Return(_a0 []resource.Resource, _a1 error) *ResourceService_ListResources_Call { +func (_c *ResourceService_ListResources_Call) Return(_a0 resource.PagedResource, _a1 error) *ResourceService_ListResources_Call { _c.Call.Return(_a0, _a1) return _c } -// UpdateResource provides a mock function with given fields: ctx, urn, req -func (_m *ResourceService) UpdateResource(ctx context.Context, urn string, req resource.UpdateRequest) (*resource.Resource, error) { - ret := _m.Called(ctx, urn, req) +func (_c *ResourceService_ListResources_Call) RunAndReturn(run func(context.Context, resource.Filter, bool) (resource.PagedResource, error)) *ResourceService_ListResources_Call { + _c.Call.Return(run) + return _c +} + +// UpdateResource provides a mock function with given fields: ctx, urn, req, resourceOpts +func (_m *ResourceService) UpdateResource(ctx context.Context, urn string, req resource.UpdateRequest, resourceOpts ...core.Options) (*resource.Resource, error) { + _va := make([]interface{}, len(resourceOpts)) + for _i := range resourceOpts { + _va[_i] = resourceOpts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, urn, req) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UpdateResource") + } var r0 *resource.Resource - if rf, ok := ret.Get(0).(func(context.Context, string, resource.UpdateRequest) *resource.Resource); ok { - r0 = rf(ctx, urn, req) + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, resource.UpdateRequest, ...core.Options) (*resource.Resource, error)); ok { + return rf(ctx, urn, req, resourceOpts...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, resource.UpdateRequest, ...core.Options) *resource.Resource); ok { + r0 = rf(ctx, urn, req, resourceOpts...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*resource.Resource) } } - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, resource.UpdateRequest) error); ok { - r1 = rf(ctx, urn, req) + if rf, ok := ret.Get(1).(func(context.Context, string, resource.UpdateRequest, ...core.Options) error); ok { + r1 = rf(ctx, urn, req, resourceOpts...) } else { r1 = ret.Error(1) } @@ -375,16 +501,24 @@ type ResourceService_UpdateResource_Call struct { } // UpdateResource is a helper method to define mock.On call -// - ctx context.Context -// - urn string -// - req resource.UpdateRequest -func (_e *ResourceService_Expecter) UpdateResource(ctx interface{}, urn interface{}, req interface{}) *ResourceService_UpdateResource_Call { - return &ResourceService_UpdateResource_Call{Call: _e.mock.On("UpdateResource", ctx, urn, req)} +// - ctx context.Context +// - urn string +// - req resource.UpdateRequest +// - resourceOpts ...core.Options +func (_e *ResourceService_Expecter) UpdateResource(ctx interface{}, urn interface{}, req interface{}, resourceOpts ...interface{}) *ResourceService_UpdateResource_Call { + return &ResourceService_UpdateResource_Call{Call: _e.mock.On("UpdateResource", + append([]interface{}{ctx, urn, req}, resourceOpts...)...)} } -func (_c *ResourceService_UpdateResource_Call) Run(run func(ctx context.Context, urn string, req resource.UpdateRequest)) *ResourceService_UpdateResource_Call { +func (_c *ResourceService_UpdateResource_Call) Run(run func(ctx context.Context, urn string, req resource.UpdateRequest, resourceOpts ...core.Options)) *ResourceService_UpdateResource_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(resource.UpdateRequest)) + variadicArgs := make([]core.Options, len(args)-3) + for i, a := range args[3:] { + if a != nil { + variadicArgs[i] = a.(core.Options) + } + } + run(args[0].(context.Context), args[1].(string), args[2].(resource.UpdateRequest), variadicArgs...) }) return _c } @@ -393,3 +527,22 @@ func (_c *ResourceService_UpdateResource_Call) Return(_a0 *resource.Resource, _a _c.Call.Return(_a0, _a1) return _c } + +func (_c *ResourceService_UpdateResource_Call) RunAndReturn(run func(context.Context, string, resource.UpdateRequest, ...core.Options) (*resource.Resource, error)) *ResourceService_UpdateResource_Call { + _c.Call.Return(run) + return _c +} + +// NewResourceService creates a new instance of ResourceService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewResourceService(t interface { + mock.TestingT + Cleanup(func()) +}) *ResourceService { + mock := &ResourceService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/server/v1/modules/mappers.go b/internal/server/v1/modules/mappers.go index 5816959b..3ace9b1f 100644 --- a/internal/server/v1/modules/mappers.go +++ b/internal/server/v1/modules/mappers.go @@ -3,12 +3,12 @@ package modules import ( "encoding/json" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" "google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/timestamppb" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/pkg/errors" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" ) func moduleToProto(mod module.Module) (*entropyv1beta1.Module, error) { diff --git a/internal/server/v1/modules/server.go b/internal/server/v1/modules/server.go index 7704aa45..7ba1612c 100644 --- a/internal/server/v1/modules/server.go +++ b/internal/server/v1/modules/server.go @@ -6,10 +6,9 @@ import ( "context" "encoding/json" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/internal/server/serverutils" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/internal/server/serverutils" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" ) type ModuleService interface { diff --git a/internal/server/v1/modules/server_test.go b/internal/server/v1/modules/server_test.go index 40b571b7..4b4a9d13 100644 --- a/internal/server/v1/modules/server_test.go +++ b/internal/server/v1/modules/server_test.go @@ -6,10 +6,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" "google.golang.org/protobuf/testing/protocmp" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" ) func TestAPIServer_ListModules(t *testing.T) { diff --git a/internal/server/v1/resources/logwrapper.go b/internal/server/v1/resources/logwrapper.go new file mode 100644 index 00000000..9334615c --- /dev/null +++ b/internal/server/v1/resources/logwrapper.go @@ -0,0 +1,85 @@ +package resources + +import ( + "context" + + "go.uber.org/zap" + + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" +) + +type LogWrapper struct { + entropyv1beta1.ResourceServiceServer +} + +func (lw *LogWrapper) ListResources(ctx context.Context, request *entropyv1beta1.ListResourcesRequest) (*entropyv1beta1.ListResourcesResponse, error) { + resp, err := lw.ResourceServiceServer.ListResources(ctx, request) + if err != nil { + zap.L().Error("ListResources() failed", zap.Error(err)) + return nil, err + } + return resp, nil +} + +func (lw *LogWrapper) GetResource(ctx context.Context, request *entropyv1beta1.GetResourceRequest) (*entropyv1beta1.GetResourceResponse, error) { + resp, err := lw.ResourceServiceServer.GetResource(ctx, request) + if err != nil { + zap.L().Error("GetResource() failed", zap.Error(err)) + return nil, err + } + return resp, nil +} + +func (lw *LogWrapper) CreateResource(ctx context.Context, request *entropyv1beta1.CreateResourceRequest) (*entropyv1beta1.CreateResourceResponse, error) { + resp, err := lw.ResourceServiceServer.CreateResource(ctx, request) + if err != nil { + zap.L().Error("CreateResource() failed", zap.Error(err)) + return nil, err + } + return resp, nil +} + +func (lw *LogWrapper) UpdateResource(ctx context.Context, request *entropyv1beta1.UpdateResourceRequest) (*entropyv1beta1.UpdateResourceResponse, error) { + resp, err := lw.ResourceServiceServer.UpdateResource(ctx, request) + if err != nil { + zap.L().Error("UpdateResource() failed", zap.Error(err)) + return nil, err + } + return resp, nil +} + +func (lw *LogWrapper) DeleteResource(ctx context.Context, request *entropyv1beta1.DeleteResourceRequest) (*entropyv1beta1.DeleteResourceResponse, error) { + resp, err := lw.ResourceServiceServer.DeleteResource(ctx, request) + if err != nil { + zap.L().Error("DeleteResource() failed", zap.Error(err)) + return nil, err + } + return resp, nil +} + +func (lw *LogWrapper) ApplyAction(ctx context.Context, request *entropyv1beta1.ApplyActionRequest) (*entropyv1beta1.ApplyActionResponse, error) { + resp, err := lw.ResourceServiceServer.ApplyAction(ctx, request) + if err != nil { + zap.L().Error("ApplyAction() failed", zap.Error(err)) + return nil, err + } + return resp, nil +} + +func (lw *LogWrapper) GetLog(request *entropyv1beta1.GetLogRequest, server entropyv1beta1.ResourceService_GetLogServer) error { + err := lw.ResourceServiceServer.GetLog(request, server) + if err != nil { + zap.L().Error("GetLog() failed", zap.Error(err)) + return err + } + return nil +} + +func (lw *LogWrapper) GetResourceRevisions(ctx context.Context, request *entropyv1beta1.GetResourceRevisionsRequest) (*entropyv1beta1.GetResourceRevisionsResponse, error) { + resp, err := lw.ResourceServiceServer.GetResourceRevisions(ctx, request) + if err != nil { + zap.L().Error("GetResourceRevisions() failed", zap.Error(err)) + return nil, err + } + return resp, nil +} diff --git a/internal/server/v1/resources/mappers.go b/internal/server/v1/resources/mappers.go index 8bbcccd9..3db14586 100644 --- a/internal/server/v1/resources/mappers.go +++ b/internal/server/v1/resources/mappers.go @@ -4,12 +4,12 @@ import ( "encoding/json" "strconv" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" "google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/timestamppb" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" ) const decimalBase = 10 @@ -17,12 +17,12 @@ const decimalBase = 10 func resourceToProto(res resource.Resource) (*entropyv1beta1.Resource, error) { protoState, err := resourceStateToProto(res.State) if err != nil { - return nil, err + return nil, errors.ErrInternal.WithMsgf("state to protobuf failed").WithCausef(err.Error()) } spec, err := resourceSpecToProto(res.Spec) if err != nil { - return nil, err + return nil, errors.ErrInternal.WithMsgf("spec to protobuf failed").WithCausef(err.Error()) } return &entropyv1beta1.Resource{ @@ -35,6 +35,8 @@ func resourceToProto(res resource.Resource) (*entropyv1beta1.Resource, error) { UpdatedAt: timestamppb.New(res.UpdatedAt), Spec: spec, State: protoState, + CreatedBy: res.CreatedBy, + UpdatedBy: res.UpdatedBy, }, nil } @@ -43,7 +45,7 @@ func resourceStateToProto(state resource.State) (*entropyv1beta1.ResourceState, if len(state.Output) > 0 { outputVal = &structpb.Value{} if err := json.Unmarshal(state.Output, outputVal); err != nil { - return nil, err + return nil, errors.ErrInternal.WithMsgf("failed to unmarshal output").WithCausef(err.Error()) } } @@ -52,17 +54,28 @@ func resourceStateToProto(state resource.State) (*entropyv1beta1.ResourceState, protoStatus = entropyv1beta1.ResourceState_Status(resourceStatus) } + var nextSyncAt *timestamppb.Timestamp + if state.NextSyncAt != nil { + nextSyncAt = timestamppb.New(*state.NextSyncAt) + } + return &entropyv1beta1.ResourceState{ - Status: protoStatus, - Output: outputVal, - ModuleData: state.ModuleData, + Status: protoStatus, + Output: outputVal, + ModuleData: state.ModuleData, + LogOptions: nil, + SyncRetries: int32(state.SyncResult.Retries), + SyncLastError: state.SyncResult.LastError, + NextSyncAt: nextSyncAt, }, nil } func resourceSpecToProto(spec resource.Spec) (*entropyv1beta1.ResourceSpec, error) { - conf := structpb.Value{} - if err := json.Unmarshal(spec.Configs, &conf); err != nil { - return nil, err + conf := structpb.NewNullValue() + if spec.Configs != nil { + if err := json.Unmarshal(spec.Configs, &conf); err != nil { + return nil, errors.ErrInternal.WithMsgf("json.Unmarshal failed for spec.configs").WithCausef(err.Error()) + } } var deps []*entropyv1beta1.ResourceDependency @@ -74,23 +87,18 @@ func resourceSpecToProto(spec resource.Spec) (*entropyv1beta1.ResourceSpec, erro } return &entropyv1beta1.ResourceSpec{ - Configs: &conf, + Configs: conf, Dependencies: deps, }, nil } -func resourceFromProto(res *entropyv1beta1.Resource) (*resource.Resource, error) { +func resourceFromProto(res *entropyv1beta1.Resource, includeState bool) (*resource.Resource, error) { spec, err := resourceSpecFromProto(res.Spec) if err != nil { return nil, err } - jsonData, err := res.State.GetOutput().GetStructValue().MarshalJSON() - if err != nil { - return nil, err - } - - return &resource.Resource{ + mappedRes := &resource.Resource{ URN: res.GetUrn(), Kind: res.GetKind(), Name: res.GetName(), @@ -99,12 +107,22 @@ func resourceFromProto(res *entropyv1beta1.Resource) (*resource.Resource, error) CreatedAt: res.GetCreatedAt().AsTime(), UpdatedAt: res.GetUpdatedAt().AsTime(), Spec: *spec, - State: resource.State{ + } + + if includeState { + jsonData, err := res.State.GetOutput().GetStructValue().MarshalJSON() + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("state.output is not valid json").WithCausef(err.Error()) + } + + mappedRes.State = resource.State{ Status: res.State.GetStatus().String(), Output: jsonData, ModuleData: res.State.GetModuleData(), - }, - }, nil + } + } + + return mappedRes, nil } func resourceSpecFromProto(spec *entropyv1beta1.ResourceSpec) (*resource.Spec, error) { @@ -120,7 +138,7 @@ func resourceSpecFromProto(spec *entropyv1beta1.ResourceSpec) (*resource.Spec, e confJSON, err := spec.GetConfigs().MarshalJSON() if err != nil { - return nil, err + return nil, errors.ErrInvalid.WithMsgf("configs is not valid JSON").WithCausef(err.Error()) } return &resource.Spec{ @@ -141,6 +159,7 @@ func revisionToProto(revision resource.Revision) (*entropyv1beta1.ResourceRevisi Reason: revision.Reason, Labels: revision.Labels, CreatedAt: timestamppb.New(revision.CreatedAt), + CreatedBy: revision.CreatedBy, Spec: spec, }, nil } diff --git a/internal/server/v1/resources/server.go b/internal/server/v1/resources/server.go index 5fbe0452..35a66efb 100644 --- a/internal/server/v1/resources/server.go +++ b/internal/server/v1/resources/server.go @@ -5,21 +5,21 @@ package resources import ( "context" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/internal/server/serverutils" + "github.com/goto/entropy/core" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/internal/server/serverutils" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" ) type ResourceService interface { GetResource(ctx context.Context, urn string) (*resource.Resource, error) - ListResources(ctx context.Context, filter resource.Filter) ([]resource.Resource, error) - CreateResource(ctx context.Context, res resource.Resource) (*resource.Resource, error) - UpdateResource(ctx context.Context, urn string, req resource.UpdateRequest) (*resource.Resource, error) + ListResources(ctx context.Context, filter resource.Filter, withSpecConfigs bool) (resource.PagedResource, error) + CreateResource(ctx context.Context, res resource.Resource, resourceOpts ...core.Options) (*resource.Resource, error) + UpdateResource(ctx context.Context, urn string, req resource.UpdateRequest, resourceOpts ...core.Options) (*resource.Resource, error) DeleteResource(ctx context.Context, urn string) error - ApplyAction(ctx context.Context, urn string, action module.ActionRequest) (*resource.Resource, error) + ApplyAction(ctx context.Context, urn string, action module.ActionRequest, resourceOpts ...core.Options) (*resource.Resource, error) GetLog(ctx context.Context, urn string, filter map[string]string) (<-chan module.LogChunk, error) GetRevisions(ctx context.Context, selector resource.RevisionsSelector) ([]resource.Revision, error) @@ -27,23 +27,29 @@ type ResourceService interface { type APIServer struct { entropyv1beta1.UnimplementedResourceServiceServer - - resourceService ResourceService + resourceSvc ResourceService } func NewAPIServer(resourceService ResourceService) *APIServer { return &APIServer{ - resourceService: resourceService, + resourceSvc: resourceService, } } func (server APIServer) CreateResource(ctx context.Context, request *entropyv1beta1.CreateResourceRequest) (*entropyv1beta1.CreateResourceResponse, error) { - res, err := resourceFromProto(request.Resource) + res, err := resourceFromProto(request.Resource, false) + if err != nil { + return nil, serverutils.ToRPCError(err) + } + + userIdentifier, err := serverutils.GetUserIdentifier(ctx) if err != nil { return nil, serverutils.ToRPCError(err) } + res.CreatedBy = userIdentifier + res.UpdatedBy = userIdentifier - result, err := server.resourceService.CreateResource(ctx, *res) + result, err := server.resourceSvc.CreateResource(ctx, *res, core.WithDryRun(request.GetDryRun())) if err != nil { return nil, serverutils.ToRPCError(err) } @@ -64,12 +70,18 @@ func (server APIServer) UpdateResource(ctx context.Context, request *entropyv1be return nil, serverutils.ToRPCError(err) } + userIdentifier, err := serverutils.GetUserIdentifier(ctx) + if err != nil { + return nil, serverutils.ToRPCError(err) + } + updateRequest := resource.UpdateRequest{ Spec: *newSpec, Labels: request.Labels, + UserID: userIdentifier, } - res, err := server.resourceService.UpdateResource(ctx, request.GetUrn(), updateRequest) + res, err := server.resourceSvc.UpdateResource(ctx, request.GetUrn(), updateRequest, core.WithDryRun(request.GetDryRun())) if err != nil { return nil, serverutils.ToRPCError(err) } @@ -85,7 +97,7 @@ func (server APIServer) UpdateResource(ctx context.Context, request *entropyv1be } func (server APIServer) GetResource(ctx context.Context, request *entropyv1beta1.GetResourceRequest) (*entropyv1beta1.GetResourceResponse, error) { - res, err := server.resourceService.GetResource(ctx, request.GetUrn()) + res, err := server.resourceSvc.GetResource(ctx, request.GetUrn()) if err != nil { return nil, serverutils.ToRPCError(err) } @@ -102,18 +114,22 @@ func (server APIServer) GetResource(ctx context.Context, request *entropyv1beta1 func (server APIServer) ListResources(ctx context.Context, request *entropyv1beta1.ListResourcesRequest) (*entropyv1beta1.ListResourcesResponse, error) { filter := resource.Filter{ - Kind: request.GetKind(), - Project: request.GetProject(), - Labels: nil, + Kind: request.GetKind(), + Project: request.GetProject(), + Labels: request.Labels, + PageSize: request.PageSize, + PageNum: request.PageNum, } - resources, err := server.resourceService.ListResources(ctx, filter) + withSpecConfigs := request.GetWithSpecConfigs() + + resources, err := server.resourceSvc.ListResources(ctx, filter, withSpecConfigs) if err != nil { return nil, serverutils.ToRPCError(err) } var responseResources []*entropyv1beta1.Resource - for _, res := range resources { + for _, res := range resources.Resources { responseResource, err := resourceToProto(res) if err != nil { return nil, serverutils.ToRPCError(err) @@ -122,12 +138,13 @@ func (server APIServer) ListResources(ctx context.Context, request *entropyv1bet } return &entropyv1beta1.ListResourcesResponse{ + Count: int32(len(responseResources)), Resources: responseResources, }, nil } func (server APIServer) DeleteResource(ctx context.Context, request *entropyv1beta1.DeleteResourceRequest) (*entropyv1beta1.DeleteResourceResponse, error) { - err := server.resourceService.DeleteResource(ctx, request.GetUrn()) + err := server.resourceSvc.DeleteResource(ctx, request.GetUrn()) if err != nil { return nil, serverutils.ToRPCError(err) } @@ -141,13 +158,19 @@ func (server APIServer) ApplyAction(ctx context.Context, request *entropyv1beta1 return nil, err } + userIdentifier, err := serverutils.GetUserIdentifier(ctx) + if err != nil { + return nil, serverutils.ToRPCError(err) + } + action := module.ActionRequest{ Name: request.GetAction(), Params: paramsJSON, Labels: request.Labels, + UserID: userIdentifier, } - updatedRes, err := server.resourceService.ApplyAction(ctx, request.GetUrn(), action) + updatedRes, err := server.resourceSvc.ApplyAction(ctx, request.GetUrn(), action, core.WithDryRun(request.GetDryRun())) if err != nil { return nil, serverutils.ToRPCError(err) } @@ -165,7 +188,7 @@ func (server APIServer) ApplyAction(ctx context.Context, request *entropyv1beta1 func (server APIServer) GetLog(request *entropyv1beta1.GetLogRequest, stream entropyv1beta1.ResourceService_GetLogServer) error { ctx := stream.Context() - logStream, err := server.resourceService.GetLog(ctx, request.GetUrn(), request.GetFilter()) + logStream, err := server.resourceSvc.GetLog(ctx, request.GetUrn(), request.GetFilter()) if err != nil { return serverutils.ToRPCError(err) } @@ -195,7 +218,7 @@ func (server APIServer) GetLog(request *entropyv1beta1.GetLogRequest, stream ent } func (server APIServer) GetResourceRevisions(ctx context.Context, request *entropyv1beta1.GetResourceRevisionsRequest) (*entropyv1beta1.GetResourceRevisionsResponse, error) { - revisions, err := server.resourceService.GetRevisions(ctx, resource.RevisionsSelector{URN: request.GetUrn()}) + revisions, err := server.resourceSvc.GetRevisions(ctx, resource.RevisionsSelector{URN: request.GetUrn()}) if err != nil { return nil, serverutils.ToRPCError(err) } diff --git a/internal/server/v1/resources/server_test.go b/internal/server/v1/resources/server_test.go index c9d4de55..92f7ef46 100644 --- a/internal/server/v1/resources/server_test.go +++ b/internal/server/v1/resources/server_test.go @@ -10,16 +10,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - entropyv1beta1 "go.buf.build/odpf/gwv/odpf/proton/odpf/entropy/v1beta1" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/timestamppb" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/internal/server/v1/mocks" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/internal/server/v1/mocks" + "github.com/goto/entropy/pkg/errors" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" ) func TestAPIServer_CreateResource(t *testing.T) { @@ -43,7 +45,7 @@ func TestAPIServer_CreateResource(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - CreateResource(mock.Anything, mock.Anything). + CreateResource(mock.Anything, mock.Anything, core.WithDryRun(false)). Return(nil, errors.ErrConflict).Once() return NewAPIServer(resourceService) }, @@ -59,7 +61,7 @@ func TestAPIServer_CreateResource(t *testing.T) { }, }, want: nil, - wantErr: status.Error(codes.AlreadyExists, "an entity with conflicting identifier exists"), + wantErr: status.Error(codes.AlreadyExists, "conflict: an entity with conflicting identifier exists"), }, { name: "InvalidRequest", @@ -67,7 +69,7 @@ func TestAPIServer_CreateResource(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - CreateResource(mock.Anything, mock.Anything). + CreateResource(mock.Anything, mock.Anything, core.WithDryRun(false)). Return(nil, errors.ErrInvalid).Once() return NewAPIServer(resourceService) @@ -84,7 +86,7 @@ func TestAPIServer_CreateResource(t *testing.T) { }, }, want: nil, - wantErr: status.Errorf(codes.InvalidArgument, "request is not valid"), + wantErr: status.Errorf(codes.InvalidArgument, "bad_request: request is not valid"), }, { name: "Success", @@ -92,7 +94,7 @@ func TestAPIServer_CreateResource(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - CreateResource(mock.Anything, mock.Anything). + CreateResource(mock.Anything, mock.Anything, core.WithDryRun(false)). Return(&resource.Resource{ URN: "p-testdata-gl-testname-log", Kind: "log", @@ -148,7 +150,11 @@ func TestAPIServer_CreateResource(t *testing.T) { t.Parallel() srv := tt.setup(t) - got, err := srv.CreateResource(context.Background(), tt.request) + ctx := context.Background() + md := metadata.New(map[string]string{"user-id": "john.doe@goto.com"}) + ctx = metadata.NewIncomingContext(ctx, md) + + got, err := srv.CreateResource(ctx, tt.request) if tt.wantErr != nil { assert.Error(t, err) assert.Truef(t, errors.Is(err, tt.wantErr), "'%s' != '%s'", tt.wantErr, err) @@ -184,7 +190,7 @@ func TestAPIServer_UpdateResource(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - UpdateResource(mock.Anything, "p-testdata-gl-testname-log", mock.Anything). + UpdateResource(mock.Anything, "p-testdata-gl-testname-log", mock.Anything, core.WithDryRun(false)). Return(nil, errors.ErrNotFound).Once() return NewAPIServer(resourceService) }, @@ -195,7 +201,7 @@ func TestAPIServer_UpdateResource(t *testing.T) { }, }, want: nil, - wantErr: status.Error(codes.NotFound, "requested entity not found"), + wantErr: status.Error(codes.NotFound, "not_found: requested entity not found"), }, { name: "InvalidRequest", @@ -203,7 +209,7 @@ func TestAPIServer_UpdateResource(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - UpdateResource(mock.Anything, "p-testdata-gl-testname-log", mock.Anything). + UpdateResource(mock.Anything, "p-testdata-gl-testname-log", mock.Anything, core.WithDryRun(false)). Return(nil, errors.ErrInvalid).Once() return NewAPIServer(resourceService) }, @@ -214,7 +220,7 @@ func TestAPIServer_UpdateResource(t *testing.T) { }, }, want: nil, - wantErr: status.Errorf(codes.InvalidArgument, "request is not valid"), + wantErr: status.Errorf(codes.InvalidArgument, "bad_request: request is not valid"), }, { name: "Success", @@ -222,7 +228,7 @@ func TestAPIServer_UpdateResource(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - UpdateResource(mock.Anything, "p-testdata-gl-testname-log", mock.Anything). + UpdateResource(mock.Anything, "p-testdata-gl-testname-log", mock.Anything, core.WithDryRun(false)). Return(&resource.Resource{ URN: "p-testdata-gl-testname-log", Kind: "log", @@ -273,7 +279,11 @@ func TestAPIServer_UpdateResource(t *testing.T) { t.Parallel() srv := tt.setup(t) - got, err := srv.UpdateResource(context.Background(), tt.request) + ctx := context.Background() + md := metadata.New(map[string]string{"user-id": "john.doe@goto.com"}) + ctx = metadata.NewIncomingContext(ctx, md) + + got, err := srv.UpdateResource(ctx, tt.request) if tt.wantErr != nil { assert.Error(t, err) assert.True(t, errors.Is(err, tt.wantErr)) @@ -317,7 +327,7 @@ func TestAPIServer_GetResource(t *testing.T) { Urn: "p-testdata-gl-testname-log", }, want: nil, - wantErr: status.Error(codes.NotFound, "requested entity not found"), + wantErr: status.Error(codes.NotFound, "not_found: requested entity not found"), }, { name: "Success", @@ -409,8 +419,8 @@ func TestAPIServer_ListResources(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - ListResources(mock.Anything, mock.Anything). - Return(nil, errors.New("failed")).Once() + ListResources(mock.Anything, mock.Anything, false). + Return(resource.PagedResource{}, errors.New("failed")).Once() return NewAPIServer(resourceService) }, @@ -419,7 +429,7 @@ func TestAPIServer_ListResources(t *testing.T) { Kind: "log", }, want: nil, - wantErr: status.Error(codes.Internal, "some unexpected error occurred"), + wantErr: status.Error(codes.Internal, "internal_error: some unexpected error occurred: failed"), }, { name: "Success", @@ -427,21 +437,24 @@ func TestAPIServer_ListResources(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - ListResources(mock.Anything, mock.Anything). - Return([]resource.Resource{ - { - URN: "p-testdata-gl-testname-log", - Kind: "log", - Name: "testname", - Project: "p-testdata-gl", - Labels: nil, - CreatedAt: createdAt, - UpdatedAt: updatedAt, - Spec: resource.Spec{ - Configs: []byte(`{"replicas": "10"}`), - }, - State: resource.State{ - Status: resource.StatusPending, + ListResources(mock.Anything, mock.Anything, false). + Return(resource.PagedResource{ + Count: 1, + Resources: []resource.Resource{ + { + URN: "p-testdata-gl-testname-log", + Kind: "log", + Name: "testname", + Project: "p-testdata-gl", + Labels: nil, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + Spec: resource.Spec{ + Configs: []byte(`{"replicas": "10"}`), + }, + State: resource.State{ + Status: resource.StatusPending, + }, }, }, }, nil).Once() @@ -453,6 +466,7 @@ func TestAPIServer_ListResources(t *testing.T) { Kind: "log", }, want: &entropyv1beta1.ListResourcesResponse{ + Count: 1, Resources: []*entropyv1beta1.Resource{ { Urn: "p-testdata-gl-testname-log", @@ -518,7 +532,7 @@ func TestAPIServer_DeleteResource(t *testing.T) { Urn: "p-testdata-gl-testname-log", }, want: nil, - wantErr: status.Error(codes.NotFound, "requested entity not found"), + wantErr: status.Error(codes.NotFound, "not_found: requested entity not found"), }, { name: "Success", @@ -580,7 +594,7 @@ func TestAPIServer_ApplyAction(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - ApplyAction(mock.Anything, "p-testdata-gl-testname-log", mock.Anything). + ApplyAction(mock.Anything, "p-testdata-gl-testname-log", mock.Anything, core.WithDryRun(false)). Return(nil, errors.ErrNotFound).Once() return NewAPIServer(resourceService) }, @@ -589,7 +603,7 @@ func TestAPIServer_ApplyAction(t *testing.T) { Action: "scale", }, want: nil, - wantErr: status.Error(codes.NotFound, "requested entity not found"), + wantErr: status.Error(codes.NotFound, "not_found: requested entity not found"), }, { name: "Success", @@ -597,7 +611,7 @@ func TestAPIServer_ApplyAction(t *testing.T) { t.Helper() resourceService := &mocks.ResourceService{} resourceService.EXPECT(). - ApplyAction(mock.Anything, "p-testdata-gl-testname-log", mock.Anything). + ApplyAction(mock.Anything, "p-testdata-gl-testname-log", mock.Anything, core.WithDryRun(false)). Return(&resource.Resource{ URN: "p-testdata-gl-testname-log", Kind: "log", @@ -647,7 +661,11 @@ func TestAPIServer_ApplyAction(t *testing.T) { t.Parallel() srv := tt.setup(t) - got, err := srv.ApplyAction(context.Background(), tt.request) + ctx := context.Background() + md := metadata.New(map[string]string{"user-id": "john.doe@goto.com"}) + ctx = metadata.NewIncomingContext(ctx, md) + + got, err := srv.ApplyAction(ctx, tt.request) if tt.wantErr != nil { assert.Error(t, err) assert.True(t, errors.Is(err, tt.wantErr)) diff --git a/internal/store/postgres/module_model.go b/internal/store/postgres/module_model.go index 664e29ee..44460574 100644 --- a/internal/store/postgres/module_model.go +++ b/internal/store/postgres/module_model.go @@ -8,8 +8,8 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/pkg/errors" ) const tableModules = "modules" diff --git a/internal/store/postgres/module_store.go b/internal/store/postgres/module_store.go index 32edbaba..84119150 100644 --- a/internal/store/postgres/module_store.go +++ b/internal/store/postgres/module_store.go @@ -6,8 +6,8 @@ import ( sq "github.com/Masterminds/squirrel" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/pkg/errors" ) func (st *Store) GetModule(ctx context.Context, urn string) (*module.Module, error) { diff --git a/internal/store/postgres/postgres.go b/internal/store/postgres/postgres.go index 9b09170e..caf42c99 100644 --- a/internal/store/postgres/postgres.go +++ b/internal/store/postgres/postgres.go @@ -3,8 +3,11 @@ package postgres import ( "context" _ "embed" + "time" "github.com/jmoiron/sqlx" + + "github.com/goto/entropy/pkg/errors" ) const ( @@ -20,11 +23,20 @@ const ( // schema represents the storage schema. // Note: Update the constants above if the table name is changed. +// //go:embed schema.sql var schema string type Store struct { - db *sqlx.DB + db *sqlx.DB + extendInterval time.Duration + refreshInterval time.Duration + config Config +} + +type Config struct { + PaginationSizeDefault int32 + PaginationPageDefault int32 } func (st *Store) Migrate(ctx context.Context) error { @@ -34,11 +46,24 @@ func (st *Store) Migrate(ctx context.Context) error { func (st *Store) Close() error { return st.db.Close() } -// Open returns store instance backed by PostgreSQL. -func Open(conStr string) (*Store, error) { +// Open returns store instance backed by PostgresQL. +func Open(conStr string, refreshInterval, extendInterval time.Duration, paginationSizeDefault, paginationPageDefault int32) (*Store, error) { db, err := sqlx.Open("postgres", conStr) if err != nil { return nil, err } - return &Store{db: db}, nil + + if refreshInterval >= extendInterval { + return nil, errors.New("refreshInterval must be lower than extendInterval") + } + + return &Store{ + db: db, + extendInterval: extendInterval, + refreshInterval: refreshInterval, + config: Config{ + PaginationSizeDefault: paginationSizeDefault, + PaginationPageDefault: paginationPageDefault, + }, + }, nil } diff --git a/internal/store/postgres/postgres_test.go b/internal/store/postgres/postgres_test.go new file mode 100644 index 00000000..31bb2d9d --- /dev/null +++ b/internal/store/postgres/postgres_test.go @@ -0,0 +1,54 @@ +package postgres_test + +import ( + "context" + "encoding/json" + "os" + "testing" + "time" + + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/internal/store/postgres" + "github.com/goto/salt/dockertestx" + "github.com/ory/dockertest/v3" +) + +func newTestClient(t *testing.T) (*postgres.Store, *dockertest.Pool, *dockertest.Resource) { + t.Helper() + + pgDocker, err := dockertestx.CreatePostgres(dockertestx.PostgresWithDockertestResourceExpiry(120)) + if err != nil { + t.Fatal(err) + } + + store, err := postgres.Open(pgDocker.GetExternalConnString(), 3*time.Second, 5*time.Second, 0, 1) + if err != nil { + t.Fatal(err) + } + + if err := store.Migrate(context.TODO()); err != nil { + t.Fatal(err) + } + + return store, pgDocker.GetPool(), pgDocker.GetResource() +} + +func bootstrapResources(ctx context.Context, store *postgres.Store) ([]resource.Resource, error) { + testFixtureJSON, err := os.ReadFile("./testdata/resources.json") + if err != nil { + return nil, err + } + + var data []resource.Resource + if err = json.Unmarshal(testFixtureJSON, &data); err != nil { + return nil, err + } + + for _, d := range data { + if err := store.Create(ctx, d); err != nil { + return nil, err + } + } + + return data, nil +} diff --git a/internal/store/postgres/resource_model.go b/internal/store/postgres/resource_model.go index e1c7facf..18876394 100644 --- a/internal/store/postgres/resource_model.go +++ b/internal/store/postgres/resource_model.go @@ -3,32 +3,174 @@ package postgres import ( "context" "database/sql" + "encoding/json" "time" sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" + "github.com/lib/pq" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) +const listResourceByFilterQuery = `SELECT r.id, r.urn, r.kind, r.name, r.project, r.created_at, r.updated_at, r.state_status, r.state_output, r.state_module_data, r.state_next_sync, r.state_sync_result, r.created_by, r.updated_by, + COALESCE(NULLIF(array_agg(rt.tag), '{NULL}'), '{}')::text[] AS tags, + jsonb_object_agg(COALESCE(rd.dependency_key, ''), d.urn) AS dependencies +FROM resources r + LEFT JOIN resource_dependencies rd ON r.id = rd.resource_id + LEFT JOIN resources d ON rd.depends_on = d.id + LEFT JOIN resource_tags rt ON r.id = rt.resource_id +WHERE ($1 = '' OR r.project = $1) + AND ($2 = '' OR r.kind = $2) +GROUP BY r.id +LIMIT $3 +OFFSET $4 +` + +const listResourceWithSpecConfigsByFilterQuery = `SELECT r.id, r.urn, r.kind, r.name, r.project, r.created_at, r.updated_at, r.spec_configs, r.state_status, r.state_output, r.state_module_data, r.state_next_sync, r.state_sync_result, r.created_by, r.updated_by, + COALESCE(NULLIF(array_agg(rt.tag), '{NULL}'), '{}')::text[] AS tags, + jsonb_object_agg(COALESCE(rd.dependency_key, ''), d.urn) AS dependencies +FROM resources r + LEFT JOIN resource_dependencies rd ON r.id = rd.resource_id + LEFT JOIN resources d ON rd.depends_on = d.id + LEFT JOIN resource_tags rt ON r.id = rt.resource_id +WHERE ($1 = '' OR r.project = $1) + AND ($2 = '' OR r.kind = $2) +GROUP BY r.id +LIMIT $3 +OFFSET $4 +` + type resourceModel struct { - ID int64 `db:"id"` - URN string `db:"urn"` - Kind string `db:"kind"` - Name string `db:"name"` - Project string `db:"project"` - CreatedAt time.Time `db:"created_at"` - UpdatedAt time.Time `db:"updated_at"` - SpecConfigs []byte `db:"spec_configs"` - StateStatus string `db:"state_status"` - StateOutput []byte `db:"state_output"` - StateModuleData []byte `db:"state_module_data"` + ID int64 `db:"id"` + URN string `db:"urn"` + Kind string `db:"kind"` + Name string `db:"name"` + Project string `db:"project"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + CreatedBy string `db:"created_by"` + UpdatedBy string `db:"updated_by"` + SpecConfigs []byte `db:"spec_configs"` + StateStatus string `db:"state_status"` + StateOutput []byte `db:"state_output"` + StateModuleData []byte `db:"state_module_data"` + StateNextSync *time.Time `db:"state_next_sync"` + StateSyncResult json.RawMessage `db:"state_sync_result"` +} + +type ListResourceByFilterRow struct { + ID int64 + Urn string + Kind string + Name string + Project string + CreatedAt *time.Time + UpdatedAt *time.Time + SpecConfigs []byte + StateStatus string + StateOutput []byte + StateModuleData []byte + StateNextSync *time.Time + StateSyncResult []byte + CreatedBy string + UpdatedBy string + Tags pq.StringArray + Dependencies []byte +} + +func listResourceWithSpecConfigsByFilter(ctx context.Context, db *sqlx.DB, project, kind string, limit int32, offset int32) ([]ListResourceByFilterRow, error) { + // Set limit default to nil + var limitPointers *int32 + if limit != 0 { + limitPointers = &limit + } + rows, err := db.QueryContext(ctx, listResourceWithSpecConfigsByFilterQuery, project, kind, limitPointers, offset) + if err != nil { + return nil, err + } + + defer rows.Close() + var items []ListResourceByFilterRow + for rows.Next() { + var i ListResourceByFilterRow + if err := rows.Scan( + &i.ID, + &i.Urn, + &i.Kind, + &i.Name, + &i.Project, + &i.CreatedAt, + &i.UpdatedAt, + &i.SpecConfigs, + &i.StateStatus, + &i.StateOutput, + &i.StateModuleData, + &i.StateNextSync, + &i.StateSyncResult, + &i.CreatedBy, + &i.UpdatedBy, + &i.Tags, + &i.Dependencies, + ); err != nil { + return nil, err + } + items = append(items, i) + } + + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +func listResourceByFilter(ctx context.Context, db *sqlx.DB, project, kind string, limit int32, offset int32) ([]ListResourceByFilterRow, error) { + // Set limit default to nil + var limitPointers *int32 + if limit != 0 { + limitPointers = &limit + } + rows, err := db.QueryContext(ctx, listResourceByFilterQuery, project, kind, limitPointers, offset) + if err != nil { + return nil, err + } + defer rows.Close() + var items []ListResourceByFilterRow + for rows.Next() { + var i ListResourceByFilterRow + if err := rows.Scan( + &i.ID, + &i.Urn, + &i.Kind, + &i.Name, + &i.Project, + &i.CreatedAt, + &i.UpdatedAt, + &i.StateStatus, + &i.StateOutput, + &i.StateModuleData, + &i.StateNextSync, + &i.StateSyncResult, + &i.CreatedBy, + &i.UpdatedBy, + &i.Tags, + &i.Dependencies, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil } func readResourceRecord(ctx context.Context, r sqlx.QueryerContext, urn string, into *resourceModel) error { cols := []string{ - "id", "urn", "kind", "project", "name", "created_at", "updated_at", + "id", "urn", "kind", "project", "name", "created_at", "updated_at", "created_by", "updated_by", "spec_configs", "state_status", "state_output", "state_module_data", + "state_next_sync", "state_sync_result", } builder := sq.Select(cols...).From(tableResources).Where(sq.Eq{"urn": urn}) diff --git a/internal/store/postgres/resource_store.go b/internal/store/postgres/resource_store.go index 04bb6e2f..c68c76c2 100644 --- a/internal/store/postgres/resource_store.go +++ b/internal/store/postgres/resource_store.go @@ -3,12 +3,14 @@ package postgres import ( "context" "database/sql" + "encoding/json" + "time" sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" ) func (st *Store) GetByURN(ctx context.Context, urn string) (*resource.Resource, error) { @@ -36,6 +38,15 @@ func (st *Store) GetByURN(ctx context.Context, urn string) (*resource.Resource, return nil, txErr } + var syncResult resource.SyncResult + if len(rec.StateSyncResult) > 0 { + if err := json.Unmarshal(rec.StateSyncResult, &syncResult); err != nil { + return nil, errors.ErrInternal. + WithMsgf("failed to json unmarshal state_sync_result"). + WithCausef(err.Error()) + } + } + return &resource.Resource{ URN: rec.URN, Kind: rec.Kind, @@ -44,6 +55,8 @@ func (st *Store) GetByURN(ctx context.Context, urn string) (*resource.Resource, Labels: tagsToLabelMap(tags), CreatedAt: rec.CreatedAt, UpdatedAt: rec.UpdatedAt, + CreatedBy: rec.CreatedBy, + UpdatedBy: rec.UpdatedBy, Spec: resource.Spec{ Configs: rec.SpecConfigs, Dependencies: deps, @@ -52,51 +65,78 @@ func (st *Store) GetByURN(ctx context.Context, urn string) (*resource.Resource, Status: rec.StateStatus, Output: rec.StateOutput, ModuleData: rec.StateModuleData, + NextSyncAt: rec.StateNextSync, + SyncResult: syncResult, }, }, nil } -func (st *Store) List(ctx context.Context, filter resource.Filter) ([]resource.Resource, error) { - q := sq.Select("urn").From(tableResources) - if filter.Kind != "" { - q = q.Where(sq.Eq{"kind": filter.Kind}) +func (st *Store) List(ctx context.Context, filter resource.Filter, withSpecConfigs bool) ([]resource.Resource, error) { + var resourceList []ListResourceByFilterRow + + if filter.PageSize < 1 { + filter.PageSize = st.config.PaginationSizeDefault } - if filter.Project != "" { - q = q.Where(sq.Eq{"project": filter.Project}) + if filter.PageNum < 1 { + filter.PageNum = st.config.PaginationPageDefault } - if len(filter.Labels) > 0 { - tags := labelMapToTags(filter.Labels) - q = q.Join("resource_tags ON resource_id=id"). - Where(sq.Eq{"tag": tags}). - GroupBy("urn"). - Having("count(*) >= ?", len(tags)) - } + offset := (filter.PageNum - 1) * filter.PageSize - rows, err := q.PlaceholderFormat(sq.Dollar).RunWith(st.db).QueryContext(ctx) + var err error + if withSpecConfigs { + resourceList, err = listResourceWithSpecConfigsByFilter(ctx, st.db, filter.Project, filter.Kind, filter.PageSize, offset) + } else { + resourceList, err = listResourceByFilter(ctx, st.db, filter.Project, filter.Kind, filter.PageSize, offset) + } if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, nil - } return nil, err } - defer rows.Close() - var res []resource.Resource - for rows.Next() { - var urn string - if err := rows.Scan(&urn); err != nil { - return nil, err + var result []resource.Resource + for _, res := range resourceList { + var nextSyncAt *time.Time + if res.StateNextSync != nil && !res.StateNextSync.IsZero() { + nextSyncAt = res.StateNextSync + } + + var syncResult resource.SyncResult + if len(res.StateSyncResult) > 0 { + if err := json.Unmarshal(res.StateSyncResult, &syncResult); err != nil { + return nil, err + } } - r, err := st.GetByURN(ctx, urn) + deps, err := depsBytesToMap(res.Dependencies) if err != nil { return nil, err } - res = append(res, *r) + + result = append(result, resource.Resource{ + URN: res.Urn, + Kind: res.Kind, + Name: res.Name, + Project: res.Project, + Labels: tagsToLabelMap(res.Tags), + CreatedAt: *res.CreatedAt, + UpdatedAt: *res.UpdatedAt, + UpdatedBy: res.UpdatedBy, + CreatedBy: res.CreatedBy, + Spec: resource.Spec{ + Configs: res.SpecConfigs, + Dependencies: deps, + }, + State: resource.State{ + Status: res.StateStatus, + Output: res.StateOutput, + ModuleData: res.StateModuleData, + NextSyncAt: nextSyncAt, + SyncResult: syncResult, + }, + }) } - return res, rows.Err() + return result, nil } func (st *Store) Create(ctx context.Context, r resource.Resource, hooks ...resource.MutationHook) error { @@ -115,13 +155,14 @@ func (st *Store) Create(ctx context.Context, r resource.Resource, hooks ...resou } rev := resource.Revision{ - URN: r.URN, - Spec: r.Spec, - Labels: r.Labels, - Reason: "resource created", + URN: r.URN, + Spec: r.Spec, + Labels: r.Labels, + Reason: "action:create", + CreatedBy: r.UpdatedBy, } - if err := insertRevision(ctx, tx, rev); err != nil { + if err := insertRevision(ctx, tx, id, rev); err != nil { return translateErr(err) } @@ -146,10 +187,13 @@ func (st *Store) Update(ctx context.Context, r resource.Resource, saveRevision b Where(sq.Eq{"id": id}). SetMap(map[string]interface{}{ "updated_at": sq.Expr("current_timestamp"), + "updated_by": r.UpdatedBy, "spec_configs": r.Spec.Configs, "state_status": r.State.Status, "state_output": r.State.Output, "state_module_data": r.State.ModuleData, + "state_next_sync": r.State.NextSyncAt, + "state_sync_result": syncResultAsJSON(r.State.SyncResult), }). PlaceholderFormat(sq.Dollar) @@ -167,13 +211,14 @@ func (st *Store) Update(ctx context.Context, r resource.Resource, saveRevision b if saveRevision { rev := resource.Revision{ - URN: r.URN, - Spec: r.Spec, - Labels: r.Labels, - Reason: reason, + URN: r.URN, + Spec: r.Spec, + Labels: r.Labels, + Reason: reason, + CreatedBy: r.UpdatedBy, } - if err := insertRevision(ctx, tx, rev); err != nil { + if err := insertRevision(ctx, tx, id, rev); err != nil { return translateErr(err) } } @@ -219,17 +264,117 @@ func (st *Store) Delete(ctx context.Context, urn string, hooks ...resource.Mutat return withinTx(ctx, st.db, false, deleteFn) } -func insertResourceRecord(ctx context.Context, runner sq.BaseRunner, r resource.Resource) (int64, error) { - q := sq.Insert(tableResources). - Columns("urn", "kind", "project", "name", "created_at", "updated_at", - "spec_configs", "state_status", "state_output", "state_module_data"). - Values(r.URN, r.Kind, r.Project, r.Name, r.CreatedAt, r.UpdatedAt, - r.Spec.Configs, r.State.Status, r.State.Output, r.State.ModuleData). - Suffix(`RETURNING "id"`). - PlaceholderFormat(sq.Dollar) +func (st *Store) SyncOne(ctx context.Context, scope map[string][]string, syncFn resource.SyncFn) error { + urn, err := st.fetchResourceForSync(ctx, scope) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // No resource available for sync. + return nil + } + return err + } + + cur, err := st.GetByURN(ctx, urn) + if err != nil { + return err + } + + synced, err := st.handleDequeued(ctx, *cur, syncFn) + if err != nil { + return err + } + + return st.Update(ctx, *synced, false, "sync") +} + +func (st *Store) handleDequeued(baseCtx context.Context, res resource.Resource, fn resource.SyncFn) (*resource.Resource, error) { + runCtx, cancel := context.WithCancel(baseCtx) + defer cancel() + + // Run heartbeat to keep the resource being picked up by some other syncer + // thread. If heartbeat exits, runCtx will be cancelled and fn should exit. + go st.runHeartbeat(runCtx, cancel, res.URN) + + return fn(runCtx, res) +} + +func (st *Store) fetchResourceForSync(ctx context.Context, scope map[string][]string) (string, error) { + var urn string + + // find a resource ready for sync, extend it next sync time atomically. + // this ensures multiple workers do not pick up same resources for sync. + err := withinTx(ctx, st.db, false, func(ctx context.Context, tx *sqlx.Tx) error { + builder := sq. + Select("urn"). + From(tableResources). + Where(sq.Expr("state_next_sync <= current_timestamp")). + Suffix("FOR UPDATE SKIP LOCKED") + + for key, value := range scope { + builder = builder.Where(sq.Eq{key: value}) + } + + query, args, err := builder.PlaceholderFormat(sq.Dollar).ToSql() + if err != nil { + return err + } + + if err := st.db.QueryRowxContext(ctx, query, args...).Scan(&urn); err != nil { + return err + } + + return st.extendWaitTime(ctx, tx, urn) + }) + + return urn, err +} + +func (st *Store) runHeartbeat(ctx context.Context, cancel context.CancelFunc, id string) { + defer cancel() + + tick := time.NewTicker(st.refreshInterval) + defer tick.Stop() + + for { + select { + case <-ctx.Done(): + return + + case <-tick.C: + if err := st.extendWaitTime(ctx, st.db, id); err != nil { + return + } + } + } +} + +func (st *Store) extendWaitTime(ctx context.Context, r sq.BaseRunner, urn string) error { + extendTo := sq.Expr("current_timestamp + (? ||' seconds')::interval ", st.extendInterval.Seconds()) + extendQuery := sq.Update(tableResources). + Set("state_next_sync", extendTo). + Where(sq.Eq{"urn": urn}) + + _, err := extendQuery.PlaceholderFormat(sq.Dollar).RunWith(r).ExecContext(ctx) + return err +} + +func insertResourceRecord(ctx context.Context, runner sqlx.QueryerContext, r resource.Resource) (int64, error) { + builder := sq.Insert(tableResources). + Columns("urn", "kind", "project", "name", "created_at", "updated_at", "created_by", "updated_by", + "spec_configs", "state_status", "state_output", "state_module_data", + "state_next_sync", "state_sync_result"). + Values(r.URN, r.Kind, r.Project, r.Name, r.CreatedAt, r.UpdatedAt, r.CreatedBy, r.UpdatedBy, + r.Spec.Configs, r.State.Status, r.State.Output, r.State.ModuleData, + r.State.NextSyncAt, syncResultAsJSON(r.State.SyncResult)). + Suffix(`RETURNING "id"`) + + q, args, err := builder.PlaceholderFormat(sq.Dollar).ToSql() + if err != nil { + return 0, err + } var id int64 - if err := q.RunWith(runner).QueryRowContext(ctx).Scan(&id); err != nil { + if err := runner.QueryRowxContext(ctx, q, args...).Scan(&id); err != nil { return 0, err } return id, nil @@ -265,3 +410,32 @@ func setDependencies(ctx context.Context, runner sq.BaseRunner, id int64, deps m return nil } + +func syncResultAsJSON(syncRes resource.SyncResult) json.RawMessage { + if syncRes == (resource.SyncResult{}) { + return nil + } + val, err := json.Marshal(syncRes) + if err != nil { + panic(err) + } + return val +} + +func depsBytesToMap(dependencies []byte) (map[string]string, error) { + deps := map[string]string{} + if len(dependencies) > 0 { + if err := json.Unmarshal(dependencies, &deps); err != nil { + return nil, err + } + + for k := range deps { + if k != "" { + break + } + deps = map[string]string{} + } + } + + return deps, nil +} diff --git a/internal/store/postgres/resource_store_test.go b/internal/store/postgres/resource_store_test.go new file mode 100644 index 00000000..5bad1ce5 --- /dev/null +++ b/internal/store/postgres/resource_store_test.go @@ -0,0 +1,119 @@ +package postgres_test + +import ( + "context" + "testing" + + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/internal/store/postgres" + "github.com/goto/entropy/pkg/errors" + "github.com/ory/dockertest/v3" + "github.com/stretchr/testify/suite" +) + +type ResourceStoreTestSuite struct { + suite.Suite + ctx context.Context + pool *dockertest.Pool + resource *dockertest.Resource + store *postgres.Store + resources []resource.Resource +} + +func (s *ResourceStoreTestSuite) SetupTest() { + s.store, s.pool, s.resource = newTestClient(s.T()) + s.ctx = context.Background() + + var err error + s.resources, err = bootstrapResources(s.ctx, s.store) + if err != nil { + s.T().Fatal(err) + } + + s.Assert().Equal(len(s.resources), 6) +} + +func (s *ResourceStoreTestSuite) TestSyncOne() { + type testCase struct { + Description string + Scope map[string][]string + syncFn resource.SyncFn + ErrString string + } + + projectScope := []string{"test-project-00"} + kindScope := []string{"dagger"} + unknownScope := []string{"unknown"} + + testCases := []testCase{ + { + Description: "if scope is empty, it will take any job", + Scope: map[string][]string{}, + syncFn: func(ctx context.Context, res resource.Resource) (*resource.Resource, error) { + if res.URN == "" { + return nil, errors.New("no empty resource") + } + return &res, nil + }, + }, + { + Description: "take job with project test-project-00", + Scope: map[string][]string{ + "project": projectScope, + }, + syncFn: func(ctx context.Context, res resource.Resource) (*resource.Resource, error) { + if res.Project != "test-project-00" { + return nil, errors.New("wrong resource project") + } + return &res, nil + }, + }, + { + Description: "take job with kind dagger", + Scope: map[string][]string{ + "kind": kindScope, + }, + syncFn: func(ctx context.Context, res resource.Resource) (*resource.Resource, error) { + if res.Kind != "dagger" { + return nil, errors.New("wrong resource kind") + } + return &res, nil + }, + }, + { + Description: "throw error for unknown field", + Scope: map[string][]string{ + "unknown": unknownScope, + }, + syncFn: func(ctx context.Context, res resource.Resource) (*resource.Resource, error) { + return &res, nil + }, + ErrString: "pq: column \"unknown\" does not exist", + }, + } + + for _, tc := range testCases { + s.Run(tc.Description, func() { + err := s.store.SyncOne(s.ctx, tc.Scope, tc.syncFn) + if tc.ErrString != "" { + if err.Error() != tc.ErrString { + s.T().Fatalf("got error %s, expected was %s", err.Error(), tc.ErrString) + } + } else { + if err != nil { + s.T().Fatalf("got error %s, expected was none", err.Error()) + } + } + }) + } +} + +func (s *ResourceStoreTestSuite) TearDownTest() { + if err := s.pool.Purge(s.resource); err != nil { + s.T().Fatal(err) + } +} + +func TestResourceStore(t *testing.T) { + suite.Run(t, new(ResourceStoreTestSuite)) +} diff --git a/internal/store/postgres/revision_model.go b/internal/store/postgres/revision_model.go index 785727ae..03268bc5 100644 --- a/internal/store/postgres/revision_model.go +++ b/internal/store/postgres/revision_model.go @@ -2,41 +2,20 @@ package postgres import ( "context" - "database/sql" "time" sq "github.com/Masterminds/squirrel" - "github.com/jmoiron/sqlx" - - "github.com/odpf/entropy/pkg/errors" ) type revisionModel struct { ID int64 `db:"id"` - URN string `db:"urn"` Reason string `db:"reason"` CreatedAt time.Time `db:"created_at"` + CreatedBy string `db:"created_by"` + ResourceID int64 `db:"resource_id"` SpecConfigs []byte `db:"spec_configs"` } -func readRevisionRecord(ctx context.Context, r sqlx.QueryerContext, id int64, into *revisionModel) error { - cols := []string{"id", "urn", "reason", "created_at", "spec_configs"} - builder := sq.Select(cols...).From(tableRevisions).Where(sq.Eq{"id": id}) - - query, args, err := builder.PlaceholderFormat(sq.Dollar).ToSql() - if err != nil { - return err - } - - if err := r.QueryRowxContext(ctx, query, args...).StructScan(into); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return errors.ErrNotFound - } - return err - } - return nil -} - func readRevisionTags(ctx context.Context, r sq.BaseRunner, revisionID int64, into *[]string) error { return readTags(ctx, r, tableRevisionTags, columnRevisionID, revisionID, into) } diff --git a/internal/store/postgres/revision_store.go b/internal/store/postgres/revision_store.go index 7b08ba1e..bb1f989d 100644 --- a/internal/store/postgres/revision_store.go +++ b/internal/store/postgres/revision_store.go @@ -7,108 +7,92 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" ) func (st *Store) Revisions(ctx context.Context, selector resource.RevisionsSelector) ([]resource.Revision, error) { - q := sq.Select("id"). - From(tableRevisions). - Where(sq.Eq{"urn": selector.URN}) - - rows, err := q.PlaceholderFormat(sq.Dollar).RunWith(st.db).QueryContext(ctx) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, nil - } - return nil, err - } - defer rows.Close() - var revs []resource.Revision - for rows.Next() { - var id int64 - if err := rows.Scan(&id); err != nil { - return nil, err - } - - r, err := st.getRevisionByID(ctx, id) + txFn := func(ctx context.Context, tx *sqlx.Tx) error { + resourceID, err := translateURNToID(ctx, tx, selector.URN) if err != nil { - return nil, err - } - revs = append(revs, *r) - } - - return revs, rows.Err() -} - -func (st *Store) getRevisionByID(ctx context.Context, id int64) (*resource.Revision, error) { - var rec revisionModel - var tags []string - deps := map[string]string{} - - readRevisionParts := func(ctx context.Context, tx *sqlx.Tx) error { - if err := readRevisionRecord(ctx, tx, id, &rec); err != nil { return err } - if err := readRevisionTags(ctx, tx, rec.ID, &tags); err != nil { + deps := map[string]string{} + if err := readResourceDeps(ctx, tx, resourceID, deps); err != nil { return err } - resourceID, err := translateURNToID(ctx, tx, rec.URN) + builder := sq.Select("*"). + From(tableRevisions). + Where(sq.Eq{"resource_id": resourceID}). + OrderBy("created_at DESC") + + q, args, err := builder.PlaceholderFormat(sq.Dollar).ToSql() if err != nil { return err } - if err := readResourceDeps(ctx, tx, resourceID, deps); err != nil { + rows, err := tx.QueryxContext(ctx, q, args...) + if err != nil { return err } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var rm revisionModel + if err := rows.StructScan(&rm); err != nil { + return err + } + + revs = append(revs, resource.Revision{ + ID: rm.ID, + URN: selector.URN, + Reason: rm.Reason, + CreatedAt: rm.CreatedAt, + CreatedBy: rm.CreatedBy, + Spec: resource.Spec{ + Configs: rm.SpecConfigs, + Dependencies: deps, + }, + }) + } + _ = rows.Close() + + for i, rev := range revs { + var tags []string + if err := readRevisionTags(ctx, tx, rev.ID, &tags); err != nil { + return err + } + revs[i].Labels = tagsToLabelMap(tags) + } return nil } - if txErr := withinTx(ctx, st.db, true, readRevisionParts); txErr != nil { - return nil, txErr - } - - return &resource.Revision{ - ID: rec.ID, - URN: rec.URN, - Reason: rec.Reason, - Labels: tagsToLabelMap(tags), - CreatedAt: rec.CreatedAt, - Spec: resource.Spec{ - Configs: rec.SpecConfigs, - Dependencies: deps, - }, - }, nil -} - -func insertRevision(ctx context.Context, tx *sqlx.Tx, rev resource.Revision) error { - revisionID, err := insertRevisionRecord(ctx, tx, rev) - if err != nil { - return err - } - - if err := setRevisionTags(ctx, tx, revisionID, rev.Labels); err != nil { - return err + if err := withinTx(ctx, st.db, true, txFn); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, err } - return nil + return revs, nil } -func insertRevisionRecord(ctx context.Context, runner sq.BaseRunner, r resource.Revision) (int64, error) { +func insertRevision(ctx context.Context, tx *sqlx.Tx, resID int64, rev resource.Revision) error { q := sq.Insert(tableRevisions). - Columns("urn", "reason", "spec_configs"). - Values(r.URN, r.Reason, r.Spec.Configs). + Columns("resource_id", "reason", "spec_configs", "created_by"). + Values(resID, rev.Reason, rev.Spec.Configs, rev.CreatedBy). Suffix(`RETURNING "id"`). PlaceholderFormat(sq.Dollar) - var id int64 - if err := q.RunWith(runner).QueryRowContext(ctx).Scan(&id); err != nil { - return 0, err + var revisionID int64 + if err := q.RunWith(tx).QueryRowContext(ctx).Scan(&revisionID); err != nil { + return err } - return id, nil + + return setRevisionTags(ctx, tx, revisionID, rev.Labels) } func setRevisionTags(ctx context.Context, runner sq.BaseRunner, id int64, labels map[string]string) error { diff --git a/internal/store/postgres/schema.sql b/internal/store/postgres/schema.sql index 6f7c6f06..baa5ba7d 100644 --- a/internal/store/postgres/schema.sql +++ b/internal/store/postgres/schema.sql @@ -1,35 +1,49 @@ -CREATE TABLE IF NOT EXISTS resources +CREATE TABLE IF NOT EXISTS modules ( - id BIGSERIAL NOT NULL PRIMARY KEY, - urn TEXT NOT NULL UNIQUE, - kind TEXT NOT NULL, - name TEXT NOT NULL, - project TEXT NOT NULL, - created_at timestamp NOT NULL DEFAULT current_timestamp, - updated_at timestamp NOT NULL DEFAULT current_timestamp, - spec_configs bytea NOT NULL, - state_status TEXT NOT NULL, - state_output bytea NOT NULL, - state_module_data bytea NOT NULL + urn TEXT NOT NULL PRIMARY KEY, + name TEXT NOT NULL, + project TEXT NOT NULL, + configs bytea NOT NULL, + created_at timestamptz NOT NULL DEFAULT current_timestamp, + updated_at timestamptz NOT NULL DEFAULT current_timestamp ); +CREATE INDEX IF NOT EXISTS idx_modules_project ON modules (project); +CREATE TABLE IF NOT EXISTS resources +( + id BIGSERIAL NOT NULL PRIMARY KEY, + urn TEXT NOT NULL UNIQUE, + kind TEXT NOT NULL, + name TEXT NOT NULL, + project TEXT NOT NULL, + created_at timestamptz NOT NULL DEFAULT current_timestamp, + updated_at timestamptz NOT NULL DEFAULT current_timestamp, + spec_configs bytea NOT NULL, + state_status TEXT NOT NULL, + state_output bytea NOT NULL, + state_module_data bytea NOT NULL, + state_next_sync timestamptz, + state_sync_result bytea +); CREATE INDEX IF NOT EXISTS idx_resources_kind ON resources (kind); -CREATE INDEX IF NOT EXISTS idx_resources_name ON resources (name); CREATE INDEX IF NOT EXISTS idx_resources_project ON resources (project); CREATE INDEX IF NOT EXISTS idx_resources_state_status ON resources (state_status); +CREATE INDEX IF NOT EXISTS idx_resources_next_sync ON resources (state_next_sync); CREATE TABLE IF NOT EXISTS resource_dependencies ( resource_id BIGINT NOT NULL REFERENCES resources (id), dependency_key TEXT NOT NULL, depends_on BIGINT NOT NULL REFERENCES resources (id), + UNIQUE (resource_id, dependency_key) ); CREATE TABLE IF NOT EXISTS resource_tags ( - resource_id BIGINT NOT NULL REFERENCES resources (id), tag TEXT NOT NULL, + resource_id BIGINT NOT NULL REFERENCES resources (id), + UNIQUE (resource_id, tag) ); CREATE INDEX IF NOT EXISTS idx_resource_tags_resource_id ON resource_tags (resource_id); @@ -37,33 +51,32 @@ CREATE INDEX IF NOT EXISTS idx_resource_tags_tag ON resource_tags (tag); CREATE TABLE IF NOT EXISTS revisions ( - id BIGSERIAL NOT NULL PRIMARY KEY, - urn TEXT NOT NULL, - spec_configs bytea NOT NULL, - created_at timestamp NOT NULL DEFAULT current_timestamp + id BIGSERIAL NOT NULL PRIMARY KEY, + reason TEXT NOT NULL DEFAULT '', + created_at timestamptz NOT NULL DEFAULT current_timestamp, + resource_id BIGINT NOT NULL REFERENCES resources (id), + spec_configs bytea NOT NULL ); - -CREATE INDEX IF NOT EXISTS idx_revisions_urn ON revisions (urn); +CREATE INDEX IF NOT EXISTS idx_revisions_resource_id ON revisions (resource_id); CREATE INDEX IF NOT EXISTS idx_revisions_created_at ON revisions (created_at); CREATE TABLE IF NOT EXISTS revision_tags ( - revision_id BIGINT NOT NULL REFERENCES revisions (id), tag TEXT NOT NULL, + revision_id BIGINT NOT NULL REFERENCES revisions (id), + UNIQUE (revision_id, tag) ); CREATE INDEX IF NOT EXISTS idx_revision_tags_revision_id ON revision_tags (revision_id); CREATE INDEX IF NOT EXISTS idx_revision_tags_tag ON revision_tags (tag); --- -CREATE TABLE IF NOT EXISTS modules ( - urn TEXT NOT NULL PRIMARY KEY, - name TEXT NOT NULL, - project TEXT NOT NULL, - configs jsonb NOT NULL, - created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, - updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp -); +ALTER TABLE resources + ADD COLUMN IF NOT EXISTS created_by TEXT NOT NULL DEFAULT '', + ADD COLUMN IF NOT EXISTS updated_by TEXT NOT NULL DEFAULT ''; -CREATE INDEX IF NOT EXISTS idx_modules_project ON modules (project); -ALTER TABLE revisions ADD COLUMN IF NOT EXISTS reason TEXT DEFAULT '' NOT NULL; +ALTER TABLE revisions ADD COLUMN IF NOT EXISTS created_by TEXT NOT NULL DEFAULT ''; + +ALTER TABLE revision_tags +DROP CONSTRAINT revision_tags_revision_id_fkey, + ADD CONSTRAINT revision_tags_revision_id_fkey FOREIGN KEY (revision_id) + REFERENCES revisions (id) ON DELETE CASCADE; \ No newline at end of file diff --git a/internal/store/postgres/testdata/resources.json b/internal/store/postgres/testdata/resources.json new file mode 100644 index 00000000..a383d1f2 --- /dev/null +++ b/internal/store/postgres/testdata/resources.json @@ -0,0 +1,74 @@ +[ + { + "urn": "orn:entropy:firehose:test-project-00:test-firehose", + "kind": "firehose", + "name": "test-firehose", + "project": "test-project-00", + "labels": { + "description": "test firehose resource" + }, + "state": { + "next_sync_at": "0001-01-01T00:00:00Z" + } + }, + { + "urn": "orn:entropy:dagger:test-project-01:test-dagger", + "kind": "dagger", + "name": "test-dagger", + "project": "test-project-01", + "labels": { + "description": "test dagger resource" + }, + "state": { + "next_sync_at": "0001-01-01T00:00:00Z" + } + }, + { + "urn": "orn:entropy:firehose:test-project-01:test-firehose-01", + "kind": "firehose", + "name": "test-firehose", + "project": "test-project-01", + "labels": { + "description": "test firehose resource" + }, + "state": { + "next_sync_at": "0001-01-01T00:00:00Z" + } + }, + { + "urn": "orn:entropy:dagger:test-project-00:test-dagger-01", + "kind": "dagger", + "name": "test-dagger", + "project": "test-project-00", + "labels": { + "description": "test dagger resource" + }, + "state": { + "next_sync_at": "0001-01-01T00:00:00Z" + } + }, + { + "urn": "orn:entropy:firehose:test-project-00:test-firehose-02", + "kind": "firehose", + "name": "test-firehose", + "project": "test-project-00", + "labels": { + "description": "test firehose resource" + }, + "state": { + "next_sync_at": "0001-01-01T00:00:00Z" + } + }, + { + "urn": "orn:entropy:dagger:test-project-01:test-dagger-02", + "kind": "dagger", + "name": "test-dagger", + "project": "test-project-01", + "labels": { + "description": "test dagger resource" + }, + "state": { + "next_sync_at": "0001-01-01T00:00:00Z" + } + } +] \ No newline at end of file diff --git a/internal/store/postgres/utils.go b/internal/store/postgres/utils.go index c6c33c9a..caa003af 100644 --- a/internal/store/postgres/utils.go +++ b/internal/store/postgres/utils.go @@ -7,8 +7,8 @@ import ( "github.com/jmoiron/sqlx" "github.com/lib/pq" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" ) type TxFunc func(ctx context.Context, tx *sqlx.Tx) error diff --git a/main.go b/main.go index 0861d5cf..d2565722 100644 --- a/main.go +++ b/main.go @@ -5,7 +5,7 @@ import ( "os/signal" "syscall" - "github.com/odpf/entropy/cli" + "github.com/goto/entropy/cli" ) func main() { diff --git a/modules/dagger/config.go b/modules/dagger/config.go new file mode 100644 index 00000000..c3e31f76 --- /dev/null +++ b/modules/dagger/config.go @@ -0,0 +1,469 @@ +package dagger + +import ( + _ "embed" + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/flink" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/validator" +) + +const ( + helmReleaseNameMaxLength = 45 +) + +// Stream-related constants +const ( + keyStreams = "STREAMS" + keySinkType = "SINK_TYPE" +) + +// Flink-related constants +const ( + keyFlinkJobID = "FLINK_JOB_ID" + keyFlinkParallelism = "FLINK_PARALLELISM" +) + +// Influx-related constants +const ( + keySinkInfluxURL = "SINK_INFLUX_URL" + keySinkInfluxPassword = "SINK_INFLUX_PASSWORD" + keySinkInfluxDBName = "SINK_INFLUX_DB_NAME" + keySinkInfluxUsername = "SINK_INFLUX_USERNAME" + keySinkInfluxMeasurementName = "SINK_INFLUX_MEASUREMENT_NAME" + keySinkInfluxRetentionPolicy = "SINK_INFLUX_RETENTION_POLICY" + keySinkInfluxFlushDurationMs = "SINK_INFLUX_FLUSH_DURATION_MS" + keySinkInfluxBatchSize = "SINK_INFLUX_BATCH_SIZE" +) + +// Kafka-related constants +const ( + SourceKafkaConsumerConfigAutoCommitEnable = "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_COMMIT_ENABLE" + SourceKafkaConsumerConfigAutoOffsetReset = "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET" + SourceKafkaConsumerConfigBootstrapServers = "SOURCE_KAFKA_CONSUMER_CONFIG_BOOTSTRAP_SERVERS" + keySinkKafkaBrokers = "SINK_KAFKA_BROKERS" + keySinkKafkaStream = "SINK_KAFKA_STREAM" + keySinkKafkaProtoMsg = "SINK_KAFKA_PROTO_MESSAGE" + keySinkKafkaTopic = "SINK_KAFKA_TOPIC" + keySinkKafkaKey = "SINK_KAFKA_PROTO_KEY" + keySinkKafkaLingerMs = "SINK_KAFKA_LINGER_MS" +) + +// Sink types +const ( + SinkTypeInflux = "INFLUX" + SinkTypeKafka = "KAFKA" + SinkTypeBigquery = "BIGQUERY" +) + +// BigQuery-related constants +const ( + keySinkBigqueryGoogleCloudProjectID = "SINK_BIGQUERY_GOOGLE_CLOUD_PROJECT_ID" + keySinkBigqueryDatasetName = "SINK_BIGQUERY_DATASET_NAME" + keySinkBigqueryTableName = "SINK_BIGQUERY_TABLE_NAME" + keySinkBigqueryDatasetLabels = "SINK_BIGQUERY_DATASET_LABELS" + keySinkBigqueryTableLabels = "SINK_BIGQUERY_TABLE_LABELS" + keySinkBigqueryTablePartitioningEnable = "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE" + keySinkBigqueryTableClusteringEnable = "SINK_BIGQUERY_TABLE_CLUSTERING_ENABLE" + keySinkBigqueryBatchSize = "SINK_BIGQUERY_BATCH_SIZE" + keySinkBigqueryTablePartitionKey = "SINK_BIGQUERY_TABLE_PARTITION_KEY" + keySinkBigqueryRowInsertIDEnable = "SINK_BIGQUERY_ROW_INSERT_ID_ENABLE" + keySinkBigqueryClientReadTimeoutMs = "SINK_BIGQUERY_CLIENT_READ_TIMEOUT_MS" + keySinkBigqueryClientConnectTimeoutMs = "SINK_BIGQUERY_CLIENT_CONNECT_TIMEOUT_MS" + keySinkBigqueryTablePartitionExpiryMs = "SINK_BIGQUERY_TABLE_PARTITION_EXPIRY_MS" + keySinkBigqueryDatasetLocation = "SINK_BIGQUERY_DATASET_LOCATION" + keySinkErrorTypesForFailure = "SINK_ERROR_TYPES_FOR_FAILURE" + keySinkBigqueryTableClusteringKeys = "SINK_BIGQUERY_TABLE_CLUSTERING_KEYS" + keySinkConnectorSchemaProtoMessageClass = "SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS" + keySinkKafkaProduceLargeMessageEnable = "SINK_KAFKA_PRODUCE_LARGE_MESSAGE_ENABLE" + keySinkBigqueryCredentialPath = "SINK_BIGQUERY_CREDENTIAL_PATH" +) + +var ( + //go:embed schema/config.json + configSchemaRaw []byte + + validateConfig = validator.FromJSONSchema(configSchemaRaw) +) + +type SchemaRegistryStencilURLsParams struct { + SchemaRegistryStencilURLs string `json:"schema_registry_stencil_urls"` +} + +type UsageSpec struct { + CPU string `json:"cpu,omitempty" validate:"required"` + Memory string `json:"memory,omitempty" validate:"required"` +} + +type Resources struct { + TaskManager UsageSpec `json:"taskmanager,omitempty"` + JobManager UsageSpec `json:"jobmanager,omitempty"` +} + +type Config struct { + Resources Resources `json:"resources,omitempty"` + Source []Source `json:"source,omitempty"` + Sink Sink `json:"sink,omitempty"` + EnvVariables map[string]string `json:"env_variables,omitempty"` + Replicas int `json:"replicas"` + SinkType string `json:"sink_type"` + Team string `json:"team"` + FlinkName string `json:"flink_name,omitempty"` + DeploymentID string `json:"deployment_id,omitempty"` + Savepoint any `json:"savepoint,omitempty"` + ChartValues *ChartValues `json:"chart_values,omitempty"` + Deleted bool `json:"deleted,omitempty"` + Namespace string `json:"namespace,omitempty"` + PrometheusURL string `json:"prometheus_url,omitempty"` + JarURI string `json:"jar_uri,omitempty"` + State string `json:"state"` + JobState string `json:"job_state"` + ResetOffset string `json:"reset_offset"` + StopTime *time.Time `json:"stop_time,omitempty"` + DaggerCheckpointURL string `json:"dagger_checkpoint_url,omitempty"` + DaggerSavepointURL string `json:"dagger_savepoint_url,omitempty"` + DaggerK8sHAURL string `json:"dagger_k8s_ha_url,omitempty"` + CloudProvider string `json:"cloud_provider,omitempty"` +} + +type ChartValues struct { + ImageRepository string `json:"image_repository" validate:"required"` + ImageTag string `json:"image_tag" validate:"required"` + ChartVersion string `json:"chart_version" validate:"required"` + ImagePullPolicy string `json:"image_pull_policy"` +} + +type SourceDetail struct { + SourceName string `json:"SOURCE_NAME"` + SourceType string `json:"SOURCE_TYPE"` +} + +type SourceKafka struct { + SourceKafkaConsumerConfigAutoCommitEnable string `json:"SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_COMMIT_ENABLE"` + SourceKafkaConsumerConfigAutoOffsetReset string `json:"SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET"` + SourceKafkaTopicNames string `json:"SOURCE_KAFKA_TOPIC_NAMES"` + SourceKafkaName string `json:"SOURCE_KAFKA_NAME"` + SourceKafkaConsumerConfigGroupID string `json:"SOURCE_KAFKA_CONSUMER_CONFIG_GROUP_ID"` + SourceKafkaConsumerConfigBootstrapServers string `json:"SOURCE_KAFKA_CONSUMER_CONFIG_BOOTSTRAP_SERVERS"` +} + +type SourceParquet struct { + SourceParquetFileDateRange interface{} `json:"SOURCE_PARQUET_FILE_DATE_RANGE"` + SourceParquetFilePaths []string `json:"SOURCE_PARQUET_FILE_PATHS"` +} + +type Source struct { + InputSchemaProtoClass string `json:"INPUT_SCHEMA_PROTO_CLASS"` + InputSchemaEventTimestampFieldIndex string `json:"INPUT_SCHEMA_EVENT_TIMESTAMP_FIELD_INDEX"` + SourceDetails []SourceDetail `json:"SOURCE_DETAILS"` + InputSchemaTable string `json:"INPUT_SCHEMA_TABLE"` + SourceKafka + SourceParquet +} + +type SinkKafka struct { + SinkKafkaBrokers string `json:"SINK_KAFKA_BROKERS"` + SinkKafkaStream string `json:"SINK_KAFKA_STREAM"` + SinkKafkaTopic string `json:"SINK_KAFKA_TOPIC"` + SinkKafkaProtoMsg string `json:"SINK_KAFKA_PROTO_MESSAGE"` + SinkKafkaLingerMs string `json:"SINK_KAFKA_LINGER_MS"` + SinkKafkaProtoKey string `json:"SINK_KAFKA_PROTO_KEY"` + SinkKafkaProduceLargeMessageEnable string `json:"SINK_KAFKA_PRODUCE_LARGE_MESSAGE_ENABLE"` +} + +type SinkInflux struct { + SinkInfluxBatchSize string `json:"SINK_INFLUX_BATCH_SIZE,omitempty" source:"entropy"` + SinkInfluxDBName string `json:"SINK_INFLUX_DB_NAME,omitempty" source:"dex"` + SinkInfluxFlushDurationMs string `json:"SINK_INFLUX_FLUSH_DURATION_MS,omitempty" source:"entropy"` + SinkInfluxPassword string `json:"SINK_INFLUX_PASSWORD,omitempty" source:"entropy"` + SinkInfluxRetentionPolicy string `json:"SINK_INFLUX_RETENTION_POLICY,omitempty" source:"entropy"` + SinkInfluxURL string `json:"SINK_INFLUX_URL,omitempty" source:"entropy"` + SinkInfluxUsername string `json:"SINK_INFLUX_USERNAME,omitempty" source:"entropy"` + SinkInfluxMeasurementName string `json:"SINK_INFLUX_MEASUREMENT_NAME"` +} + +type SinkBigquery struct { + SinkBigqueryTablePartitionExpiryMs string `json:"SINK_BIGQUERY_TABLE_PARTITION_EXPIRY_MS"` + SinkBigqueryRowInsertIDEnable string `json:"SINK_BIGQUERY_ROW_INSERT_ID_ENABLE"` + SinkBigqueryClientReadTimeoutMs string `json:"SINK_BIGQUERY_CLIENT_READ_TIMEOUT_MS"` + SinkBigqueryClientConnectTimeoutMs string `json:"SINK_BIGQUERY_CLIENT_CONNECT_TIMEOUT_MS"` + SinkBigqueryCredentialPath string `json:"SINK_BIGQUERY_CREDENTIAL_PATH"` + + SinkBigqueryGoogleCloudProjectID string `json:"SINK_BIGQUERY_GOOGLE_CLOUD_PROJECT_ID"` + SinkBigqueryTableName string `json:"SINK_BIGQUERY_TABLE_NAME"` + SinkBigqueryDatasetLabels string `json:"SINK_BIGQUERY_DATASET_LABELS"` + SinkBigqueryTableLabels string `json:"SINK_BIGQUERY_TABLE_LABELS"` + SinkBigqueryDatasetName string `json:"SINK_BIGQUERY_DATASET_NAME"` + SinkBigqueryTablePartitioningEnable string `json:"SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE"` + SinkBigqueryTablePartitionKey string `json:"SINK_BIGQUERY_TABLE_PARTITION_KEY"` + SinkBigqueryDatasetLocation string `json:"SINK_BIGQUERY_DATASET_LOCATION"` + SinkBigqueryBatchSize string `json:"SINK_BIGQUERY_BATCH_SIZE"` + SinkBigqueryTableClusteringEnable string `json:"SINK_BIGQUERY_TABLE_CLUSTERING_ENABLE"` + SinkBigqueryTableClusteringKeys string `json:"SINK_BIGQUERY_TABLE_CLUSTERING_KEYS"` + SinkErrorTypesForFailure string `json:"SINK_ERROR_TYPES_FOR_FAILURE"` + SinkConnectorSchemaProtoMessageClass string `json:"SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS"` +} + +type Sink struct { + SinkKafka + SinkInflux + SinkBigquery +} + +func readConfig(r module.ExpandedResource, confJSON json.RawMessage, dc driverConf) (*Config, error) { + var cfg Config + err := json.Unmarshal(confJSON, &cfg) + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("invalid config json").WithCausef(err.Error()) + } + + //transformation #6 + // note: enforce the kubernetes deployment name length limit. + if len(cfg.DeploymentID) == 0 { + cfg.DeploymentID = modules.BuildResourceName("dagger", r.Name, r.Project, helmReleaseNameMaxLength) + } else if len(cfg.DeploymentID) > helmReleaseNameMaxLength { + return nil, errors.ErrInvalid.WithMsgf("deployment_id must not have more than 53 chars") + } + + //transformation #9 and #11 + //transformation #1 + source := cfg.Source + + if !(len(source[0].SourceParquet.SourceParquetFilePaths) > 0) { + maxConsumerGroupIDSuffix := "0000" + for i := range source { + _, number := splitNameAndNumber(source[i].SourceKafkaConsumerConfigGroupID) + if number > maxConsumerGroupIDSuffix { + maxConsumerGroupIDSuffix = number + } + } + + numberOffset := 0 + + for i := range source { + //TODO: check how to handle increment group id on update + if source[i].SourceKafkaConsumerConfigGroupID == "" { + numberOffset += 1 + source[i].SourceKafkaConsumerConfigGroupID = incrementGroupId(cfg.DeploymentID+"-"+maxConsumerGroupIDSuffix, numberOffset) + } + if source[i].SourceKafkaConsumerConfigAutoCommitEnable == "" { + source[i].SourceKafkaConsumerConfigAutoCommitEnable = dc.EnvVariables[SourceKafkaConsumerConfigAutoCommitEnable] + } + if source[i].SourceKafkaConsumerConfigAutoOffsetReset == "" { + source[i].SourceKafkaConsumerConfigAutoOffsetReset = dc.EnvVariables[SourceKafkaConsumerConfigAutoOffsetReset] + } + if source[i].SourceKafkaConsumerConfigBootstrapServers == "" { + source[i].SourceKafkaConsumerConfigBootstrapServers = dc.EnvVariables[SourceKafkaConsumerConfigBootstrapServers] + } + } + } + + cfg.Source = source + //transformation #3 + var flinkOut flink.Output + if err := json.Unmarshal(r.Dependencies[keyFlinkDependency].Output, &flinkOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid flink state").WithCausef(err.Error()) + } + + //transformation #13 + cfg.EnvVariables[keySinkType] = cfg.SinkType + if cfg.SinkType == SinkTypeKafka { + if cfg.Sink.SinkKafka.SinkKafkaProduceLargeMessageEnable == "" { + cfg.Sink.SinkKafka.SinkKafkaProduceLargeMessageEnable = dc.EnvVariables[keySinkKafkaProduceLargeMessageEnable] + } + + cfg.EnvVariables[keySinkKafkaStream] = cfg.Sink.SinkKafka.SinkKafkaStream + cfg.EnvVariables[keySinkKafkaBrokers] = cfg.Sink.SinkKafka.SinkKafkaBrokers + cfg.EnvVariables[keySinkKafkaProtoMsg] = cfg.Sink.SinkKafka.SinkKafkaProtoMsg + cfg.EnvVariables[keySinkKafkaTopic] = cfg.Sink.SinkKafka.SinkKafkaTopic + cfg.EnvVariables[keySinkKafkaKey] = cfg.Sink.SinkKafka.SinkKafkaProtoKey + cfg.EnvVariables[keySinkKafkaLingerMs] = cfg.Sink.SinkKafka.SinkKafkaLingerMs + } else if cfg.SinkType == SinkTypeInflux { + if cfg.Sink.SinkInflux.SinkInfluxPassword == "" { + cfg.Sink.SinkInflux.SinkInfluxPassword = flinkOut.Influx.Password + } + + if cfg.Sink.SinkInflux.SinkInfluxURL == "" { + cfg.Sink.SinkInflux.SinkInfluxURL = flinkOut.Influx.URL + } + + if cfg.Sink.SinkInflux.SinkInfluxUsername == "" { + cfg.Sink.SinkInflux.SinkInfluxUsername = flinkOut.Influx.Username + } + + if cfg.Sink.SinkInflux.SinkInfluxDBName == "" { + cfg.Sink.SinkInflux.SinkInfluxDBName = flinkOut.Influx.DatabaseName + } + + if cfg.Sink.SinkInflux.SinkInfluxFlushDurationMs == "" { + cfg.Sink.SinkInflux.SinkInfluxFlushDurationMs = dc.EnvVariables[keySinkInfluxFlushDurationMs] + } + + if cfg.Sink.SinkInflux.SinkInfluxRetentionPolicy == "" { + cfg.Sink.SinkInflux.SinkInfluxRetentionPolicy = dc.EnvVariables[keySinkInfluxRetentionPolicy] + } + + if cfg.Sink.SinkInflux.SinkInfluxBatchSize == "" { + cfg.Sink.SinkInflux.SinkInfluxBatchSize = dc.EnvVariables[keySinkInfluxBatchSize] + } + + cfg.EnvVariables[keySinkInfluxPassword] = cfg.Sink.SinkInflux.SinkInfluxPassword + cfg.EnvVariables[keySinkInfluxURL] = cfg.Sink.SinkInflux.SinkInfluxURL + cfg.EnvVariables[keySinkInfluxUsername] = cfg.Sink.SinkInflux.SinkInfluxUsername + + cfg.EnvVariables[keySinkInfluxFlushDurationMs] = cfg.Sink.SinkInflux.SinkInfluxFlushDurationMs + cfg.EnvVariables[keySinkInfluxRetentionPolicy] = cfg.Sink.SinkInflux.SinkInfluxRetentionPolicy + cfg.EnvVariables[keySinkInfluxBatchSize] = cfg.Sink.SinkInflux.SinkInfluxBatchSize + + cfg.EnvVariables[keySinkInfluxDBName] = cfg.Sink.SinkInflux.SinkInfluxDBName + cfg.EnvVariables[keySinkInfluxMeasurementName] = cfg.Sink.SinkInflux.SinkInfluxMeasurementName + } else if cfg.SinkType == SinkTypeBigquery { + if cfg.Sink.SinkBigquery.SinkBigqueryRowInsertIDEnable == "" { + cfg.Sink.SinkBigquery.SinkBigqueryRowInsertIDEnable = dc.EnvVariables[keySinkBigqueryRowInsertIDEnable] + } + if cfg.Sink.SinkBigquery.SinkBigqueryClientReadTimeoutMs == "" { + cfg.Sink.SinkBigquery.SinkBigqueryClientReadTimeoutMs = dc.EnvVariables[keySinkBigqueryClientReadTimeoutMs] + } + if cfg.Sink.SinkBigquery.SinkBigqueryClientConnectTimeoutMs == "" { + cfg.Sink.SinkBigquery.SinkBigqueryClientConnectTimeoutMs = dc.EnvVariables[keySinkBigqueryClientConnectTimeoutMs] + } + if cfg.Sink.SinkBigquery.SinkBigqueryTablePartitionExpiryMs == "" { + cfg.Sink.SinkBigquery.SinkBigqueryTablePartitionExpiryMs = dc.EnvVariables[keySinkBigqueryTablePartitionExpiryMs] + } + if cfg.Sink.SinkBigquery.SinkBigqueryCredentialPath == "" { + cfg.Sink.SinkBigquery.SinkBigqueryCredentialPath = dc.EnvVariables[keySinkBigqueryCredentialPath] + } + + cfg.EnvVariables[keySinkBigqueryRowInsertIDEnable] = cfg.Sink.SinkBigquery.SinkBigqueryRowInsertIDEnable + cfg.EnvVariables[keySinkBigqueryClientReadTimeoutMs] = cfg.Sink.SinkBigquery.SinkBigqueryClientReadTimeoutMs + cfg.EnvVariables[keySinkBigqueryClientConnectTimeoutMs] = cfg.Sink.SinkBigquery.SinkBigqueryClientConnectTimeoutMs + cfg.EnvVariables[keySinkBigqueryTablePartitionExpiryMs] = cfg.Sink.SinkBigquery.SinkBigqueryTablePartitionExpiryMs + cfg.EnvVariables[keySinkBigqueryCredentialPath] = cfg.Sink.SinkBigquery.SinkBigqueryCredentialPath + + cfg.EnvVariables[keySinkBigqueryGoogleCloudProjectID] = cfg.Sink.SinkBigquery.SinkBigqueryGoogleCloudProjectID + cfg.EnvVariables[keySinkBigqueryDatasetName] = cfg.Sink.SinkBigquery.SinkBigqueryDatasetName + cfg.EnvVariables[keySinkBigqueryTableName] = cfg.Sink.SinkBigquery.SinkBigqueryTableName + cfg.EnvVariables[keySinkBigqueryDatasetLabels] = cfg.Sink.SinkBigquery.SinkBigqueryDatasetLabels + cfg.EnvVariables[keySinkBigqueryTableLabels] = cfg.Sink.SinkBigquery.SinkBigqueryTableLabels + cfg.EnvVariables[keySinkBigqueryTablePartitioningEnable] = cfg.Sink.SinkBigquery.SinkBigqueryTablePartitioningEnable + cfg.EnvVariables[keySinkBigqueryTablePartitionKey] = cfg.Sink.SinkBigquery.SinkBigqueryTablePartitionKey + cfg.EnvVariables[keySinkBigqueryDatasetLocation] = cfg.Sink.SinkBigquery.SinkBigqueryDatasetLocation + cfg.EnvVariables[keySinkBigqueryBatchSize] = cfg.Sink.SinkBigquery.SinkBigqueryBatchSize + cfg.EnvVariables[keySinkBigqueryTableClusteringEnable] = cfg.Sink.SinkBigquery.SinkBigqueryTableClusteringEnable + cfg.EnvVariables[keySinkBigqueryTableClusteringKeys] = cfg.Sink.SinkBigquery.SinkBigqueryTableClusteringKeys + cfg.EnvVariables[keySinkErrorTypesForFailure] = cfg.Sink.SinkBigquery.SinkErrorTypesForFailure + cfg.EnvVariables[keySinkConnectorSchemaProtoMessageClass] = cfg.Sink.SinkBigquery.SinkConnectorSchemaProtoMessageClass + } + + //transformation #2 + cfg.EnvVariables = modules.CloneAndMergeMaps(dc.EnvVariables, cfg.EnvVariables) + + cfg.Namespace = flinkOut.KubeNamespace + + //transformation #4 + //transform resource name to safe length + + //transformation #5 + //TODO: build name from title as project--dagger + cfg.EnvVariables[keyFlinkJobID] = modules.BuildResourceName("dagger", r.Name, r.Project, math.MaxInt) + + //transformation #7 + cfg.EnvVariables[keySinkInfluxURL] = flinkOut.Influx.URL + cfg.EnvVariables[keySinkInfluxUsername] = flinkOut.Influx.Username + + delete(cfg.EnvVariables, SourceKafkaConsumerConfigAutoOffsetReset) + delete(cfg.EnvVariables, SourceKafkaConsumerConfigAutoCommitEnable) + + //SINK_INFLUX_DB_NAME is added by client + //SINK_INFLUX_MEASUREMENT_NAME is added by client + //REDIS_SERVER is skipped + + //transformation #8 + //Longbow configs would be in base configs + + //transformation #10 + //this shall check if the project of the conf.EnvVars.STREAMS is same as that of the corresponding flink + //do we need to check this? + + //transformation #14 + cfg.Resources = mergeResources(dc.Resources, cfg.Resources) + + cfg.PrometheusURL = flinkOut.PrometheusURL + cfg.FlinkName = flinkOut.FlinkName + + if cfg.Replicas <= 0 { + cfg.Replicas = 1 + } + + if err := validateConfig(confJSON); err != nil { + return nil, err + } + + cfg.DaggerCheckpointURL = dc.DaggerCheckpointURL + cfg.DaggerK8sHAURL = dc.DaggerK8sHAURL + cfg.DaggerSavepointURL = dc.DaggerSavepointURL + + cfg.CloudProvider = dc.CloudProvider + if cfg.CloudProvider == "" { + cfg.CloudProvider = "gcp" + } + + return &cfg, nil +} + +func incrementGroupId(groupId string, step int) string { + incrementNumberInString := func(number string) int { + num, _ := strconv.Atoi(number) + return num + step + } + + leftZeroPad := func(number int) string { + return fmt.Sprintf("%04d", number) + } + + name, number := splitNameAndNumber(groupId) + + updatedNumber := leftZeroPad(incrementNumberInString(number)) + return strings.Join(append(name, updatedNumber), "-") +} + +func splitNameAndNumber(groupId string) ([]string, string) { + getLastAndRestFromArray := func(arr []string) ([]string, string) { + return arr[:len(arr)-1], arr[len(arr)-1] + } + + parts := strings.Split(groupId, "-") + return getLastAndRestFromArray(parts) +} + +func mustMarshalJSON(v interface{}) []byte { + data, err := json.Marshal(v) + if err != nil { + panic(fmt.Sprintf("failed to marshal JSON: %v", err)) + } + return data +} + +func mergeResources(oldResources, newResources Resources) Resources { + if newResources.TaskManager.CPU == "" { + newResources.TaskManager.CPU = oldResources.TaskManager.CPU + } + if newResources.TaskManager.Memory == "" { + newResources.TaskManager.Memory = oldResources.TaskManager.Memory + } + if newResources.JobManager.CPU == "" { + newResources.JobManager.CPU = oldResources.JobManager.CPU + } + if newResources.JobManager.Memory == "" { + newResources.JobManager.Memory = oldResources.JobManager.Memory + } + return newResources +} diff --git a/modules/dagger/driver.go b/modules/dagger/driver.go new file mode 100644 index 00000000..42b8baa8 --- /dev/null +++ b/modules/dagger/driver.go @@ -0,0 +1,272 @@ +package dagger + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "html/template" + "math" + "strings" + "time" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/helm" + "github.com/goto/entropy/pkg/kube" +) + +const ( + stepReleaseCreate = "release_create" + stepReleaseUpdate = "release_update" + stepReleaseStop = "release_stop" + stepKafkaReset = "kafka_reset" +) + +const ( + chartRepo = "https://goto.github.io/charts/" + chartName = "dagger-deployment-chart" + imageRepo = "gotocompany/dagger" +) + +const ( + labelsConfKey = "extra_labels" + + labelDeployment = "deployment" + labelOrchestrator = "orchestrator" + labelURN = "urn" + labelName = "name" + labelNamespace = "namespace" + labelJobState = "job_state" + labelState = "state" + + orchestratorLabelValue = "entropy" +) + +const defaultKey = "default" + +var defaultDriverConf = driverConf{ + Namespace: map[string]string{ + defaultKey: "dagger", + }, + ChartValues: ChartValues{ + ChartVersion: "0.1.0", + }, +} + +type daggerDriver struct { + timeNow func() time.Time + conf driverConf + kubeDeploy kubeDeployFn + kubeGetPod kubeGetPodFn + kubeGetCRD kubeGetCRDFn + consumerReset consumerResetFn +} + +type ( + kubeDeployFn func(ctx context.Context, isCreate bool, conf kube.Config, hc helm.ReleaseConfig) error + kubeGetPodFn func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) + kubeGetCRDFn func(ctx context.Context, conf kube.Config, ns string, name string) (kube.FlinkDeploymentStatus, error) + consumerResetFn func(ctx context.Context, conf Config, resetTo string) []Source +) + +type driverConf struct { + // Labels to be injected to the chart during deployment. Values can be Go templates. + Labels map[string]string `json:"labels,omitempty"` + + // Namespace is the kubernetes namespace where firehoses will be deployed. + Namespace map[string]string `json:"namespace" validate:"required"` + + // ChartValues is the chart and image version information. + ChartValues ChartValues `json:"chart_values" validate:"required"` + + EnvVariables map[string]string `json:"env_variables,omitempty"` + + Resources Resources `json:"resources" validate:"required"` + + JarURI string `json:"jar_uri" validate:"required"` + + DaggerCheckpointURL string `json:"dagger_checkpoint_url"` + + DaggerSavepointURL string `json:"dagger_savepoint_url"` + + DaggerK8sHAURL string `json:"dagger_k8s_ha_url"` + + CloudProvider string `json:"cloud_provider"` + + // timeout value for a kube deployment run + KubeDeployTimeout int `json:"kube_deploy_timeout_seconds"` +} + +type Output struct { + JMDeployStatus string `json:"jm_deploy_status,omitempty"` + JobStatus string `json:"job_status,omitempty"` + State string `json:"state,omitempty"` + Reconcilation string `json:"reconcilation,omitempty"` + Pods []kube.Pod `json:"pods,omitempty"` + Namespace string `json:"namespace,omitempty"` + JobID string `json:"job_id,omitempty"` + Error string `json:"error,omitempty"` +} + +type transientData struct { + PendingSteps []string `json:"pending_steps"` + ResetOffsetTo string `json:"reset_offset_to,omitempty"` +} + +func mergeChartValues(cur, newVal *ChartValues) *ChartValues { + if newVal == nil { + return cur + } + + merged := cur + + if newVal.ChartVersion != "" { + merged.ChartVersion = newVal.ChartVersion + } + if newVal.ImageRepository != "" { + merged.ImageRepository = newVal.ImageRepository + } + if newVal.ImageTag != "" { + merged.ImageTag = newVal.ImageTag + } + if newVal.ImagePullPolicy != "" { + merged.ImagePullPolicy = newVal.ImagePullPolicy + } + + return merged +} + +func readOutputData(exr module.ExpandedResource) (*Output, error) { + var curOut Output + if len(exr.Resource.State.Output) == 0 { + return &curOut, nil + } + if err := json.Unmarshal(exr.Resource.State.Output, &curOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("corrupted output").WithCausef(err.Error()) + } + return &curOut, nil +} + +func readTransientData(exr module.ExpandedResource) (*transientData, error) { + if len(exr.Resource.State.ModuleData) == 0 { + return &transientData{}, nil + } + + var modData transientData + if err := json.Unmarshal(exr.Resource.State.ModuleData, &modData); err != nil { + return nil, errors.ErrInternal.WithMsgf("corrupted transient data").WithCausef(err.Error()) + } + return &modData, nil +} + +func (dd *daggerDriver) getHelmRelease(res resource.Resource, conf Config, + kubeOut kubernetes.Output, +) (*helm.ReleaseConfig, error) { + + entropyLabels := map[string]string{ + labelDeployment: conf.DeploymentID, + labelOrchestrator: orchestratorLabelValue, + } + + otherLabels := map[string]string{ + labelURN: res.URN, + labelName: res.Name, + labelNamespace: conf.Namespace, + } + + deploymentLabels, err := renderTpl(dd.conf.Labels, modules.CloneAndMergeMaps(res.Labels, modules.CloneAndMergeMaps(entropyLabels, otherLabels))) + if err != nil { + return nil, err + } + + rc := helm.DefaultReleaseConfig() + rc.Timeout = dd.conf.KubeDeployTimeout + rc.Name = conf.DeploymentID + rc.Repository = chartRepo + rc.Chart = chartName + rc.Namespace = conf.Namespace + rc.ForceUpdate = true + rc.Version = conf.ChartValues.ChartVersion + + imageRepository := dd.conf.ChartValues.ImageRepository + if conf.ChartValues.ImageRepository != "" { + imageRepository = conf.ChartValues.ImageRepository + } + + conf.EnvVariables["SINK_TYPE"] = strings.ToLower(conf.EnvVariables["SINK_TYPE"]) + + var programArgs []string + for key, value := range conf.EnvVariables { + programArgs = append(programArgs, fmt.Sprintf("%q", "--"+key), fmt.Sprintf("%q", value)) + } + + formatted := fmt.Sprintf("[%s]", strings.Join(programArgs, ",")) + encodedProgramArgs := base64.StdEncoding.EncodeToString([]byte(formatted)) + + rc.Values = map[string]any{ + labelsConfKey: modules.CloneAndMergeMaps(deploymentLabels, entropyLabels), + "image": imageRepository, + "deployment_id": conf.DeploymentID, + "configuration": map[string]any{ + "FLINK_PARALLELISM": conf.Replicas, + }, + "projectID": res.Project, + "name": modules.BuildResourceName("dagger", res.Name, res.Project, math.MaxInt), + "team": conf.Team, + "flink_name": conf.FlinkName, + "prometheus_url": conf.PrometheusURL, + "resources": map[string]any{ + "jobmanager": map[string]any{ + "cpu": conf.Resources.JobManager.CPU, + "memory": conf.Resources.JobManager.Memory, + }, + "taskmanager": map[string]any{ + "cpu": conf.Resources.TaskManager.CPU, + "memory": conf.Resources.TaskManager.Memory, + }, + }, + "jarURI": conf.JarURI, + "programArgs": append([]string{"--encodedArgs"}, encodedProgramArgs), + "state": conf.JobState, + "namespace": conf.Namespace, + "urn": res.URN, + "dagger_checkpoint_url": conf.DaggerCheckpointURL, + "dagger_savepoint_url": conf.DaggerSavepointURL, + "dagger_k8s_ha_url": conf.DaggerK8sHAURL, + "cloud_provider": conf.CloudProvider, + } + + return rc, nil +} + +// TODO: move this to pkg +func renderTpl(labelsTpl map[string]string, labelsValues map[string]string) (map[string]string, error) { + const useZeroValueForMissingKey = "missingkey=zero" + + finalLabels := map[string]string{} + for k, v := range labelsTpl { + var buf bytes.Buffer + t, err := template.New("").Option(useZeroValueForMissingKey).Parse(v) + if err != nil { + return nil, errors.ErrInvalid. + WithMsgf("label template for '%s' is invalid", k).WithCausef(err.Error()) + } else if err := t.Execute(&buf, labelsValues); err != nil { + return nil, errors.ErrInvalid. + WithMsgf("failed to render label template").WithCausef(err.Error()) + } + + // allow empty values + // labelVal := strings.TrimSpace(buf.String()) + // if labelVal == "" { + // continue + // } + + finalLabels[k] = buf.String() + } + return finalLabels, nil +} diff --git a/modules/dagger/driver_log.go b/modules/dagger/driver_log.go new file mode 100644 index 00000000..cfa3ab57 --- /dev/null +++ b/modules/dagger/driver_log.go @@ -0,0 +1,58 @@ +package dagger + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules/flink" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +const container = "flink-main-container" + +func (dd *daggerDriver) Log(ctx context.Context, res module.ExpandedResource, filter map[string]string) (<-chan module.LogChunk, error) { + conf, err := readConfig(res, res.Spec.Configs, dd.conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + if filter == nil { + filter = map[string]string{} + } + filter["app"] = conf.DeploymentID + filter["container"] = container + + var flinkOut flink.Output + if err := json.Unmarshal(res.Dependencies[keyFlinkDependency].Output, &flinkOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid flink state").WithCausef(err.Error()) + } + kubeCl, err := kube.NewClient(ctx, flinkOut.KubeCluster.Configs) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("failed to create new kube client on firehose driver Log").WithCausef(err.Error()) + } + + logs, err := kubeCl.StreamLogs(ctx, conf.Namespace, filter) + if err != nil { + return nil, err + } + + mappedLogs := make(chan module.LogChunk) + go func() { + defer close(mappedLogs) + for { + select { + case log, ok := <-logs: + if !ok { + return + } + mappedLogs <- module.LogChunk{Data: log.Data, Labels: log.Labels} + case <-ctx.Done(): + return + } + } + }() + + return mappedLogs, err +} diff --git a/modules/dagger/driver_output.go b/modules/dagger/driver_output.go new file mode 100644 index 00000000..c6f17af8 --- /dev/null +++ b/modules/dagger/driver_output.go @@ -0,0 +1,83 @@ +package dagger + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/flink" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +func (dd *daggerDriver) Output(ctx context.Context, exr module.ExpandedResource) (json.RawMessage, error) { + output, err := readOutputData(exr) + if err != nil { + return nil, err + } + + conf, err := readConfig(exr, exr.Spec.Configs, dd.conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + var flinkOut flink.Output + if err := json.Unmarshal(exr.Dependencies[keyFlinkDependency].Output, &flinkOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + return dd.refreshOutput(ctx, exr.Resource, *conf, *output, flinkOut.KubeCluster) +} + +func (dd *daggerDriver) refreshOutput(ctx context.Context, r resource.Resource, + conf Config, output Output, kubeOut kubernetes.Output, +) (json.RawMessage, error) { + rc, err := dd.getHelmRelease(r, conf, kubeOut) + if err != nil { + return nil, err + } + + pods, crd, err := dd.getKubeResources(ctx, kubeOut.Configs, rc.Namespace, rc.Name, conf.DeploymentID) + if err != nil { + return modules.MustJSON(Output{ + Error: err.Error(), + }), nil + } + + output.Pods = pods + output.Namespace = conf.Namespace + output.JobID = conf.DeploymentID + output.JMDeployStatus = crd.JMDeployStatus + output.JobStatus = crd.JobStatus + output.Reconcilation = crd.Reconciliation + + state := output.JobStatus + if state == "FINISHED" { + state = "CANCELED" + } else if state == "RUNNING" { + state = "RUNNING" + } else { + state = "INITIALIZING" + } + output.State = state + output.Error = "" + + return modules.MustJSON(output), nil +} + +func (dd *daggerDriver) getKubeResources(ctx context.Context, configs kube.Config, namespace, name, deploymentID string) ([]kube.Pod, kube.FlinkDeploymentStatus, error) { + pods, err := dd.kubeGetPod(ctx, configs, namespace, map[string]string{"app": deploymentID}) + if err != nil { + return nil, kube.FlinkDeploymentStatus{}, err + } + + crd, err := dd.kubeGetCRD(ctx, configs, namespace, name) + if err != nil { + return nil, kube.FlinkDeploymentStatus{}, err + } + + return pods, crd, nil +} diff --git a/modules/dagger/driver_plan.go b/modules/dagger/driver_plan.go new file mode 100644 index 00000000..a4127236 --- /dev/null +++ b/modules/dagger/driver_plan.go @@ -0,0 +1,252 @@ +package dagger + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/flink" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kafka" +) + +const SourceKafkaConsumerAutoOffsetReset = "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET" +const ( + JobStateRunning = "running" + JobStateSuspended = "suspended" + StateDeployed = "DEPLOYED" + StateUserStopped = "USER_STOPPED" + StateSystemStopped = "SYSTEM_STOPPED" + KeySchemaRegistryStencilCacheAutoRefresh = "SCHEMA_REGISTRY_STENCIL_CACHE_AUTO_REFRESH" + KeySchemaRegistryStencilURLs = "SCHEMA_REGISTRY_STENCIL_URLS" +) + +func (dd *daggerDriver) Plan(_ context.Context, exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + switch act.Name { + case module.CreateAction: + return dd.planCreate(exr, act) + + case ResetAction: + return dd.planReset(exr, act) + + default: + return dd.planChange(exr, act) + } +} + +func (dd *daggerDriver) planCreate(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + conf, err := readConfig(exr, act.Params, dd.conf) + if err != nil { + return nil, err + } + + //transformation #12 + conf.EnvVariables[keyStreams] = string(mustMarshalJSON(conf.Source)) + conf.EnvVariables[keyFlinkParallelism] = fmt.Sprint(conf.Replicas) + + chartVals := mergeChartValues(&dd.conf.ChartValues, conf.ChartValues) + conf.ChartValues = chartVals + + if conf.State == "" { + conf.State = StateDeployed + } + if conf.JobState == "" { + conf.JobState = JobStateRunning + } + + immediately := dd.timeNow() + conf.JarURI = dd.conf.JarURI + exr.Resource.Labels[labelJobState] = conf.JobState + exr.Resource.Labels[labelState] = conf.State + + exr.Resource.Spec.Configs = modules.MustJSON(conf) + + err = dd.validateHelmReleaseConfigs(exr, *conf) + if err != nil { + return nil, err + } + + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: conf.Namespace, + }), + NextSyncAt: &immediately, + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + } + + return &exr.Resource, nil +} + +func (dd *daggerDriver) planChange(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + curConf, err := readConfig(exr, exr.Resource.Spec.Configs, dd.conf) + if err != nil { + return nil, err + } + + switch act.Name { + case module.UpdateAction: + newConf, err := readConfig(exr, act.Params, dd.conf) + if err != nil { + return nil, err + } + + newConf.Source = mergeConsumerGroupId(curConf.Source, newConf.Source) + newConf.EnvVariables[keyStreams] = string(mustMarshalJSON(newConf.Source)) + newConf.EnvVariables[keyFlinkParallelism] = fmt.Sprint(newConf.Replicas) + + //we want to update these irrespective of the user input + newConf.ChartValues = &dd.conf.ChartValues + newConf.JarURI = dd.conf.JarURI + + chartVals := mergeChartValues(curConf.ChartValues, newConf.ChartValues) + + // restore configs that are not user-controlled. + newConf.DeploymentID = curConf.DeploymentID + newConf.ChartValues = chartVals + + if newConf.State == "" { + newConf.State = curConf.State + } + if newConf.JobState == "" { + newConf.JobState = curConf.JobState + } + + newConf.Resources = mergeResources(curConf.Resources, newConf.Resources) + + curConf = newConf + + case StopAction: + curConf.State = StateUserStopped + curConf.JobState = JobStateSuspended + + case StartAction: + curConf.State = StateDeployed + curConf.JobState = JobStateRunning + curConf.ChartValues = &dd.conf.ChartValues + curConf.JarURI = dd.conf.JarURI + + err := updateStencilSchemaRegistryURLsParams(curConf, act) + if err != nil { + return nil, err + } + } + + immediately := dd.timeNow() + + exr.Resource.Spec.Configs = modules.MustJSON(curConf) + + if act.Labels != nil { + exr.Resource.Labels = act.Labels + } + exr.Resource.Labels[labelJobState] = curConf.JobState + exr.Resource.Labels[labelState] = curConf.State + + err = dd.validateHelmReleaseConfigs(exr, *curConf) + if err != nil { + return nil, err + } + + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &immediately, + } + + return &exr.Resource, nil +} + +func (dd *daggerDriver) planReset(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + resetValue, err := kafka.ParseResetV2Params(act.Params) + if err != nil { + return nil, err + } + + immediately := dd.timeNow() + + curConf, err := readConfig(exr, exr.Resource.Spec.Configs, dd.conf) + if err != nil { + return nil, err + } + + err = updateStencilSchemaRegistryURLsParams(curConf, act) + if err != nil { + return nil, err + } + + curConf.ResetOffset = resetValue + + curConf.Source = dd.consumerReset(context.Background(), *curConf, resetValue) + curConf.EnvVariables[keyStreams] = string(mustMarshalJSON(curConf.Source)) + + curConf.ChartValues = &dd.conf.ChartValues + curConf.JarURI = dd.conf.JarURI + + exr.Resource.Spec.Configs = modules.MustJSON(curConf) + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + NextSyncAt: &immediately, + ModuleData: modules.MustJSON(transientData{ + ResetOffsetTo: resetValue, + PendingSteps: []string{ + stepKafkaReset, + }, + }), + } + return &exr.Resource, nil +} + +func (dd *daggerDriver) validateHelmReleaseConfigs(expandedResource module.ExpandedResource, config Config) error { + var flinkOut flink.Output + if err := json.Unmarshal(expandedResource.Dependencies[keyFlinkDependency].Output, &flinkOut); err != nil { + return errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + _, err := dd.getHelmRelease(expandedResource.Resource, config, flinkOut.KubeCluster) + return err +} + +func mergeConsumerGroupId(currStreams, newStreams []Source) []Source { + if len(currStreams) != len(newStreams) { + return newStreams + } + + for i := range currStreams { + if currStreams[i].SourceParquet.SourceParquetFilePaths != nil && len(currStreams[i].SourceParquet.SourceParquetFilePaths) > 0 { + //source is parquete + //do nothing + continue + } + + if currStreams[i].SourceKafka.SourceKafkaName == newStreams[i].SourceKafka.SourceKafkaName { + newStreams[i].SourceKafka.SourceKafkaConsumerConfigGroupID = currStreams[i].SourceKafka.SourceKafkaConsumerConfigGroupID + } + } + + return newStreams +} + +func updateStencilSchemaRegistryURLsParams(curConf *Config, act module.ActionRequest) error { + if curConf.EnvVariables[KeySchemaRegistryStencilCacheAutoRefresh] != "" && curConf.EnvVariables[KeySchemaRegistryStencilCacheAutoRefresh] == "false" { + schemaRegistryStencilURLsParams := SchemaRegistryStencilURLsParams{} + err := json.Unmarshal([]byte(act.Params), &schemaRegistryStencilURLsParams) + if err != nil { + return err + } + + if schemaRegistryStencilURLsParams.SchemaRegistryStencilURLs != "" { + curConf.EnvVariables[KeySchemaRegistryStencilURLs] = schemaRegistryStencilURLsParams.SchemaRegistryStencilURLs + } + + } + return nil +} diff --git a/modules/dagger/driver_sync.go b/modules/dagger/driver_sync.go new file mode 100644 index 00000000..6d89fe6e --- /dev/null +++ b/modules/dagger/driver_sync.go @@ -0,0 +1,98 @@ +package dagger + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/flink" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" +) + +func (dd *daggerDriver) Sync(ctx context.Context, exr module.ExpandedResource) (*resource.State, error) { + modData, err := readTransientData(exr) + if err != nil { + return nil, err + } + + out, err := readOutputData(exr) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + conf, err := readConfig(exr, exr.Spec.Configs, dd.conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + var flinkOut flink.Output + if err := json.Unmarshal(exr.Dependencies[keyFlinkDependency].Output, &flinkOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid flink state").WithCausef(err.Error()) + } + + finalState := resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + } + + if len(modData.PendingSteps) > 0 { + pendingStep := modData.PendingSteps[0] + modData.PendingSteps = modData.PendingSteps[1:] + + switch pendingStep { + case stepReleaseCreate, stepReleaseUpdate, stepReleaseStop, stepKafkaReset: + isCreate := pendingStep == stepReleaseCreate + if err := dd.releaseSync(ctx, exr.Resource, isCreate, *conf, flinkOut.KubeCluster); err != nil { + return nil, err + } + default: + return nil, errors.ErrInternal.WithMsgf("unknown step: '%s'", pendingStep) + } + + // we have more pending states, so enqueue resource for another sync + // as soon as possible. + immediately := dd.timeNow() + finalState.NextSyncAt = &immediately + finalState.ModuleData = modules.MustJSON(modData) + + return &finalState, nil + } + + finalState.NextSyncAt = conf.StopTime + if conf.StopTime != nil && conf.StopTime.Before(dd.timeNow()) { + conf.JobState = JobStateSuspended + conf.State = StateSystemStopped + if err := dd.releaseSync(ctx, exr.Resource, false, *conf, flinkOut.KubeCluster); err != nil { + return nil, err + } + finalState.NextSyncAt = nil + } + + finalOut, err := dd.refreshOutput(ctx, exr.Resource, *conf, *out, flinkOut.KubeCluster) + if err != nil { + return nil, err + } + finalState.Output = finalOut + + finalState.Status = resource.StatusCompleted + finalState.ModuleData = nil + return &finalState, nil + +} + +func (dd *daggerDriver) releaseSync(ctx context.Context, r resource.Resource, + isCreate bool, conf Config, kubeOut kubernetes.Output, +) error { + rc, err := dd.getHelmRelease(r, conf, kubeOut) + if err != nil { + return err + } + + if err := dd.kubeDeploy(ctx, isCreate, kubeOut.Configs, *rc); err != nil { + return errors.ErrInternal.WithCausef(err.Error()) + } + return nil +} diff --git a/modules/dagger/module.go b/modules/dagger/module.go new file mode 100644 index 00000000..df39f956 --- /dev/null +++ b/modules/dagger/module.go @@ -0,0 +1,165 @@ +package dagger + +import ( + "context" + _ "embed" + "encoding/json" + "time" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules/flink" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/helm" + "github.com/goto/entropy/pkg/kube" + "github.com/goto/entropy/pkg/validator" + "helm.sh/helm/v3/pkg/release" + v1 "k8s.io/api/core/v1" +) + +const ( + keyFlinkDependency = "flink" + StopAction = "stop" + StartAction = "start" + ResetAction = "reset" +) + +type FlinkCRDStatus struct { + JobManagerDeploymentStatus string `json:"jobManagerDeploymentStatus"` + JobStatus string `json:"jobStatus"` + ReconciliationStatus string `json:"reconciliationStatus"` +} + +var Module = module.Descriptor{ + Kind: "dagger", + Dependencies: map[string]string{ + keyFlinkDependency: flink.Module.Kind, + }, + Actions: []module.ActionDesc{ + { + Name: module.CreateAction, + Description: "Creates a new dagger", + }, + { + Name: module.UpdateAction, + Description: "Updates an existing dagger", + }, + { + Name: StopAction, + Description: "Suspends a running dagger", + }, + { + Name: StartAction, + Description: "Starts a suspended dagger", + }, + { + Name: ResetAction, + Description: "Resets the offset of a dagger", + }, + }, + DriverFactory: func(confJSON json.RawMessage) (module.Driver, error) { + conf := defaultDriverConf // clone the default value + if err := json.Unmarshal(confJSON, &conf); err != nil { + return nil, err + } else if err := validator.TaggedStruct(conf); err != nil { + return nil, err + } + + return &daggerDriver{ + conf: conf, + timeNow: time.Now, + kubeDeploy: func(_ context.Context, isCreate bool, kubeConf kube.Config, hc helm.ReleaseConfig) error { + canUpdate := func(rel *release.Release) bool { + curLabels, ok := rel.Config[labelsConfKey].(map[string]any) + if !ok { + return false + } + newLabels, ok := hc.Values[labelsConfKey].(map[string]string) + if !ok { + return false + } + + isManagedByEntropy := curLabels[labelOrchestrator] == orchestratorLabelValue + isSameDeployment := curLabels[labelDeployment] == newLabels[labelDeployment] + + return isManagedByEntropy && isSameDeployment + } + + helmCl := helm.NewClient(&helm.Config{Kubernetes: kubeConf}) + _, errHelm := helmCl.Upsert(&hc, canUpdate) + return errHelm + }, + kubeGetPod: func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("failed to create new kube client on firehose driver kube get pod").WithCausef(err.Error()) + } + return kubeCl.GetPodDetails(ctx, ns, labels, func(pod v1.Pod) bool { + // allow pods that are in running state and are not marked for deletion + return pod.Status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil + }) + }, + kubeGetCRD: func(ctx context.Context, conf kube.Config, ns string, name string) (kube.FlinkDeploymentStatus, error) { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return kube.FlinkDeploymentStatus{}, errors.ErrInternal.WithMsgf("failed to create new kube client on firehose driver kube get pod").WithCausef(err.Error()) + } + crd, err := kubeCl.GetCRDDetails(ctx, ns, name) + if err != nil { + return kube.FlinkDeploymentStatus{}, err + } + return parseFlinkCRDStatus(crd.Object) + }, + consumerReset: consumerReset, + }, nil + }, +} + +func parseFlinkCRDStatus(flinkDeployment map[string]interface{}) (kube.FlinkDeploymentStatus, error) { + var flinkCRDStatus FlinkCRDStatus + statusInterface, ok := flinkDeployment["status"].(map[string]interface{}) + if !ok { + return kube.FlinkDeploymentStatus{}, errors.ErrInternal.WithMsgf("failed to convert flink deployment status to map[string]interface{}") + } + + if jmStatus, ok := statusInterface["jobManagerDeploymentStatus"].(string); ok { + flinkCRDStatus.JobManagerDeploymentStatus = jmStatus + } + if jobStatus, ok := statusInterface["jobStatus"].(map[string]interface{}); ok { + if st, ok := jobStatus["state"].(string); ok { + flinkCRDStatus.JobStatus = st + } + } + if reconciliationStatus, ok := statusInterface["reconciliationStatus"].(map[string]interface{}); ok { + if st, ok := reconciliationStatus["state"].(string); ok { + flinkCRDStatus.ReconciliationStatus = st + } + } + + status := kube.FlinkDeploymentStatus{ + JMDeployStatus: flinkCRDStatus.JobManagerDeploymentStatus, + JobStatus: flinkCRDStatus.JobStatus, + Reconciliation: flinkCRDStatus.ReconciliationStatus, + } + return status, nil +} + +func consumerReset(ctx context.Context, conf Config, resetTo string) []Source { + if !(len(conf.Source[0].SourceParquet.SourceParquetFilePaths) > 0) { + baseGroup := conf.Source[0].SourceKafkaConsumerConfigGroupID + for i := range conf.Source { + if conf.Source[i].SourceKafkaConsumerConfigGroupID > baseGroup { + baseGroup = conf.Source[i].SourceKafkaConsumerConfigGroupID + } + } + + offset := 0 + + for i := range conf.Source { + offset += 1 + conf.Source[i].SourceKafkaConsumerConfigGroupID = incrementGroupId(baseGroup, offset) + conf.Source[i].SourceKafkaConsumerConfigAutoOffsetReset = resetTo + } + } + + return conf.Source +} diff --git a/modules/dagger/schema/config.json b/modules/dagger/schema/config.json new file mode 100644 index 00000000..3effabd0 --- /dev/null +++ b/modules/dagger/schema/config.json @@ -0,0 +1,49 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "replicas", + "env_variables", + "team", + "source", + "sink", + "sink_type" + ], + "properties": { + "replicas": { + "type": "number", + "default": 1, + "minimum": 1 + }, + "deployment_id": { + "type": "string" + }, + "sink_type": { + "type": "string", + "enum": [ + "INFLUX", + "KAFKA", + "BIGQUERY" + ] + }, + "env_variables": { + "type": "object", + "additionalProperties": true, + "required": [ + "SINK_TYPE" + ], + "properties": { + "SINK_TYPE": { + "type": "string", + "enum": [ + "INFLUX", + "KAFKA", + "BIGQUERY" + ] + } + } + } + } + } + \ No newline at end of file diff --git a/modules/firehose/autoscaler.go b/modules/firehose/autoscaler.go new file mode 100644 index 00000000..297fffc2 --- /dev/null +++ b/modules/firehose/autoscaler.go @@ -0,0 +1,81 @@ +package firehose + +import ( + "encoding/json" + + "github.com/goto/entropy/pkg/errors" +) + +type AutoscalerType string + +const ( + KEDA AutoscalerType = "keda" +) + +type AutoscalerSpec interface { + Validate() error + ReadConfig(cfg Config, driverConf driverConf) error + Pause(replica ...int) + Resume() + GetHelmValues(cfg Config) (map[string]any, error) +} + +type Autoscaler struct { + Enabled bool `json:"enabled"` + Type AutoscalerType `json:"type,omitempty"` + Spec AutoscalerSpec `json:"spec,omitempty"` +} + +func (autoscaler *Autoscaler) GetHelmValues(cfg Config) (map[string]any, error) { + values := map[string]any{ + "enabled": autoscaler.Enabled, + "type": autoscaler.Type, + } + + typeValues, err := autoscaler.Spec.GetHelmValues(cfg) + if err != nil { + return nil, err + } + values[string(autoscaler.Type)] = typeValues + + return values, nil +} + +func (autoscaler *Autoscaler) UnmarshalJSON(data []byte) error { + type BaseAutoscaler Autoscaler + autoscalerTemp := &struct { + Spec json.RawMessage `json:"spec"` + *BaseAutoscaler + }{ + BaseAutoscaler: (*BaseAutoscaler)(autoscaler), + } + + if err := json.Unmarshal(data, &autoscalerTemp); err != nil { + return errors.ErrInvalid.WithMsgf("invalid autoscaler config").WithCausef(err.Error()) + } + + switch autoscalerTemp.Type { + case KEDA: + var kedaSpec *Keda + if err := json.Unmarshal(autoscalerTemp.Spec, &kedaSpec); err != nil { + return errors.ErrInvalid.WithMsgf("invalid keda config").WithCausef(err.Error()) + } + autoscaler.Spec = kedaSpec + default: + return errors.ErrInvalid.WithMsgf("unsupported autoscaler type: %s", autoscaler.Type) + } + return nil +} + +func (autoscaler *Autoscaler) Validate() error { + if !autoscaler.Enabled { + return nil + } + if autoscaler.Spec == nil { + return errors.ErrInvalid.WithMsgf("autoscaler spec must be provided when autoscaler is enabled") + } + if err := autoscaler.Spec.Validate(); err != nil { + return err + } + return nil +} diff --git a/modules/firehose/autoscaler_test.go b/modules/firehose/autoscaler_test.go new file mode 100644 index 00000000..f2033901 --- /dev/null +++ b/modules/firehose/autoscaler_test.go @@ -0,0 +1,189 @@ +package firehose + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAutoscaler_UnmarshalJSON(t *testing.T) { + tests := []struct { + name string + json string + want *Autoscaler + wantErr bool + }{ + { + name: "valid keda config", + json: `{ + "enabled": true, + "type": "keda", + "spec": { + "min_replicas": 1, + "max_replicas": 10, + "triggers": { + "kafka-trigger": { + "type": "kafka", + "metadata": { + "lag_threshold": "100" + } + } + } + } + }`, + want: &Autoscaler{ + Enabled: true, + Type: KEDA, + Spec: &Keda{ + MinReplicas: 1, + MaxReplicas: 10, + Triggers: map[string]Trigger{ + "kafka-trigger": { + Type: KAFKA, + Metadata: map[string]string{ + "lag_threshold": "100", + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid autoscaler type", + json: `{ + "enabled": true, + "type": "unsupported", + "spec": {} + }`, + wantErr: true, + }, + { + name: "invalid json", + json: `{invalid}`, + wantErr: true, + }, + { + name: "invalid keda spec", + json: `{ + "enabled": true, + "type": "keda", + "spec": "invalid" + }`, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got Autoscaler + err := json.Unmarshal([]byte(tt.json), &got) + + if tt.wantErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.want.Enabled, got.Enabled) + assert.Equal(t, tt.want.Type, got.Type) + assert.Equal(t, tt.want.Spec, got.Spec) + }) + } +} + +func TestAutoscaler_Validate(t *testing.T) { + tests := []struct { + name string + autoscaler *Autoscaler + wantErr bool + }{ + { + name: "valid config", + autoscaler: &Autoscaler{ + Enabled: true, + Type: KEDA, + Spec: &Keda{ + MinReplicas: 1, + MaxReplicas: 10, + Triggers: map[string]Trigger{ + "kafka-trigger": { + Type: KAFKA, + Metadata: map[string]string{ + "lag_threshold": "100", + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "disabled autoscaler", + autoscaler: &Autoscaler{ + Enabled: false, + }, + wantErr: false, + }, + { + name: "enabled but no spec", + autoscaler: &Autoscaler{ + Enabled: true, + Type: KEDA, + }, + wantErr: true, + }, + { + name: "invalid config - min replicas greater than max", + autoscaler: &Autoscaler{ + Enabled: true, + Type: KEDA, + Spec: &Keda{ + MinReplicas: 10, + MaxReplicas: 1, + Triggers: map[string]Trigger{ + "kafka-trigger": { + Type: KAFKA, + Metadata: map[string]string{ + "lag_threshold": "100", + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid config - zero max replicas", + autoscaler: &Autoscaler{ + Enabled: true, + Type: KEDA, + Spec: &Keda{ + MinReplicas: 1, + MaxReplicas: 0, + Triggers: map[string]Trigger{ + "kafka-trigger": { + Type: KAFKA, + Metadata: map[string]string{ + "lag_threshold": "100", + }, + }, + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.autoscaler.Validate() + if tt.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} diff --git a/modules/firehose/config.go b/modules/firehose/config.go index de380e98..bba3d086 100644 --- a/modules/firehose/config.go +++ b/modules/firehose/config.go @@ -6,98 +6,142 @@ import ( "fmt" "time" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/helm" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/validator" ) -const firehoseConsumerIDStartingSequence = "0001" +const ( + confSinkType = "SINK_TYPE" + confKeyConsumerID = "SOURCE_KAFKA_CONSUMER_GROUP_ID" + confKeyKafkaBrokers = "SOURCE_KAFKA_BROKERS" + confKeyKafkaTopic = "SOURCE_KAFKA_TOPIC" +) + +const helmReleaseNameMaxLength = 53 var ( //go:embed schema/config.json - completeConfigSchema string + configSchemaRaw []byte - //go:embed schema/scale.json - scaleActionSchema string - - //go:embed schema/reset.json - resetActionSchema string + validateConfig = validator.FromJSONSchema(configSchemaRaw) ) -type moduleConfig struct { - State string `json:"state"` - StopTime *time.Time `json:"stop_time"` - Telegraf map[string]interface{} `json:"telegraf"` - Firehose struct { - Replicas int `json:"replicas"` - KafkaBrokerAddress string `json:"kafka_broker_address"` - KafkaTopic string `json:"kafka_topic"` - KafkaConsumerID string `json:"kafka_consumer_id"` - EnvVariables map[string]string `json:"env_variables"` - } `json:"firehose"` +type ScaleParams struct { + Replicas int `json:"replicas"` +} + +type StartParams struct { + StopTime *time.Time `json:"stop_time"` +} + +type Config struct { + // Stopped flag when set forces the firehose to be stopped on next sync. + Stopped bool `json:"stopped"` + + // StopTime can be set to schedule the firehose to be stopped at given time. + StopTime *time.Time `json:"stop_time,omitempty"` + + // Replicas is the number of firehose instances to run. + Replicas int `json:"replicas"` + + // Namespace is the target namespace where firehose should be deployed. + // Inherits from driver config. + Namespace string `json:"namespace,omitempty"` + + // DeploymentID will be used as the release-name for the deployment. + // Must be shorter than 53 chars if set. If not set, one will be generated + // automatically. + DeploymentID string `json:"deployment_id,omitempty"` + + // EnvVariables contains all the firehose environment config values. + EnvVariables map[string]string `json:"env_variables,omitempty"` + + // ResetOffset represents the value to which kafka consumer offset was set to + ResetOffset string `json:"reset_offset,omitempty"` + + Limits UsageSpec `json:"limits,omitempty"` + Requests UsageSpec `json:"requests,omitempty"` + Telegraf *Telegraf `json:"telegraf,omitempty"` + ChartValues *ChartValues `json:"chart_values,omitempty"` + InitContainer InitContainer `json:"init_container,omitempty"` + Autoscaler *Autoscaler `json:"autoscaler,omitempty"` } -func (mc *moduleConfig) validateAndSanitize(r resource.Resource) error { - if mc.StopTime != nil && mc.StopTime.Before(time.Now()) { - return errors.ErrInvalid. - WithMsgf("value for stop_time must be greater than current time") +type Telegraf struct { + Enabled bool `json:"enabled,omitempty"` + Image map[string]any `json:"image,omitempty"` + Config TelegrafConf `json:"config,omitempty"` +} + +type TelegrafConf struct { + Output map[string]any `json:"output"` + AdditionalGlobalTags map[string]string `json:"additional_global_tags"` +} + +type ChartValues struct { + ImageRepository string `json:"image_repository" validate:"required"` + ImageTag string `json:"image_tag" validate:"required"` + ChartVersion string `json:"chart_version" validate:"required"` + ImagePullPolicy string `json:"image_pull_policy" validate:"required"` +} + +func readConfig(r resource.Resource, confJSON json.RawMessage, dc driverConf) (*Config, error) { + var cfg Config + if err := json.Unmarshal(confJSON, &cfg); err != nil { + return nil, errors.ErrInvalid.WithMsgf("invalid config json").WithCausef(err.Error()) + } + + cfg.EnvVariables = modules.CloneAndMergeMaps(dc.EnvVariables, cfg.EnvVariables) + cfg.InitContainer = dc.InitContainer + + if cfg.Replicas <= 0 { + cfg.Replicas = 1 } - if mc.Firehose.KafkaConsumerID == "" { - mc.Firehose.KafkaConsumerID = fmt.Sprintf("%s-%s", generateFirehoseName(r), firehoseConsumerIDStartingSequence) + if err := validateConfig(confJSON); err != nil { + return nil, err } - return nil -} + // note: enforce the kubernetes deployment name length limit. + if len(cfg.DeploymentID) == 0 { + cfg.DeploymentID = modules.SafeName(fmt.Sprintf("%s-%s", r.Project, r.Name), "-firehose", helmReleaseNameMaxLength) + } else if len(cfg.DeploymentID) > helmReleaseNameMaxLength { + return nil, errors.ErrInvalid.WithMsgf("deployment_id must not have more than 53 chars") + } -func (mc *moduleConfig) GetHelmReleaseConfig(r resource.Resource) (*helm.ReleaseConfig, error) { - var output Output - err := json.Unmarshal(r.State.Output, &output) - if err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid output json: %v", err) + // we name a consumer group by adding a sequence suffix to the deployment name + // this sequence will later be incremented to name new consumer group while resetting offset + if consumerID := cfg.EnvVariables[confKeyConsumerID]; consumerID == "" { + cfg.EnvVariables[confKeyConsumerID] = fmt.Sprintf("%s-1", cfg.DeploymentID) } - defaults := output.Defaults - - rc := helm.DefaultReleaseConfig() - rc.Name = generateFirehoseName(r) - rc.Repository = defaults.ChartRepository - rc.Chart = defaults.ChartName - rc.Namespace = defaults.Namespace - rc.ForceUpdate = true - rc.Version = defaults.ChartVersion - - fc := mc.Firehose - fc.EnvVariables["SOURCE_KAFKA_BROKERS"] = fc.KafkaBrokerAddress - fc.EnvVariables["SOURCE_KAFKA_TOPIC"] = fc.KafkaTopic - fc.EnvVariables["SOURCE_KAFKA_CONSUMER_GROUP_ID"] = fc.KafkaConsumerID - - hv := map[string]interface{}{ - "replicaCount": mc.Firehose.Replicas, - "firehose": map[string]interface{}{ - "image": map[string]interface{}{ - "repository": defaults.ImageRepository, - "pullPolicy": defaults.ImagePullPolicy, - "tag": defaults.ImageTag, - }, - "config": fc.EnvVariables, - }, + + rl := dc.RequestsAndLimits[defaultKey] + if overrides, ok := dc.RequestsAndLimits[cfg.EnvVariables[confSinkType]]; ok { + rl.Limits = rl.Limits.merge(overrides.Limits) + rl.Requests = rl.Requests.merge(overrides.Requests) } - if len(mc.Telegraf) > 0 { - hv["telegraf"] = mc.Telegraf + cfg.Limits = rl.Limits.merge(cfg.Limits) + cfg.Requests = rl.Requests.merge(cfg.Requests) + + if cfg.Namespace == "" { + ns := dc.Namespace[defaultKey] + if override, ok := dc.Namespace[cfg.EnvVariables[confSinkType]]; ok { + ns = override + } + cfg.Namespace = ns } - rc.Values = hv - return rc, nil -} + if cfg.Autoscaler != nil && cfg.Autoscaler.Enabled { + if err := cfg.Autoscaler.Spec.ReadConfig(cfg, dc); err != nil { + return nil, err + } -func (mc *moduleConfig) JSON() []byte { - b, err := json.Marshal(mc) - if err != nil { - panic(err) + if err := cfg.Autoscaler.Validate(); err != nil { + return nil, err + } } - return b -} -func generateFirehoseName(r resource.Resource) string { - return fmt.Sprintf("%s-%s-firehose", r.Project, r.Name) + return &cfg, nil } diff --git a/modules/firehose/config_test.go b/modules/firehose/config_test.go new file mode 100644 index 00000000..e99f60d2 --- /dev/null +++ b/modules/firehose/config_test.go @@ -0,0 +1,48 @@ +package firehose + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goto/entropy/modules" +) + +func Test_safeReleaseName(t *testing.T) { + t.Parallel() + + table := []struct { + str string + want string + }{ + { + str: "abcd-efgh", + want: "abcd-efgh-firehose", + }, + { + str: "abcd-efgh-firehose", + want: "abcd-efgh-firehose", + }, + { + str: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghijklmnopqrstuvwxyz", + want: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghij-3801d0-firehose", + }, + { + str: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghi---klmnopqrstuvwxyz", + want: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghi-81c192-firehose", + }, + { + str: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghijklmnopqr-stuvwxyz1234567890", + want: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghij-bac696-firehose", + }, + } + + for i, tt := range table { + t.Run(fmt.Sprintf("Case#%d", i), func(t *testing.T) { + got := modules.SafeName(tt.str, "-firehose", helmReleaseNameMaxLength) + assert.Equal(t, tt.want, got) + assert.True(t, len(got) <= helmReleaseNameMaxLength, "release name has length %d", len(got)) + }) + } +} diff --git a/modules/firehose/data.go b/modules/firehose/data.go deleted file mode 100644 index cbcf2168..00000000 --- a/modules/firehose/data.go +++ /dev/null @@ -1,17 +0,0 @@ -package firehose - -import "encoding/json" - -type moduleData struct { - PendingSteps []string `json:"pending_steps"` - ResetTo string `json:"reset_to,omitempty"` - StateOverride string `json:"state_override,omitempty"` -} - -func (md moduleData) JSON() json.RawMessage { - bytes, err := json.Marshal(md) - if err != nil { - panic(err) - } - return bytes -} diff --git a/modules/firehose/driver.go b/modules/firehose/driver.go new file mode 100644 index 00000000..e62c5aa1 --- /dev/null +++ b/modules/firehose/driver.go @@ -0,0 +1,564 @@ +package firehose + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "text/template" + "time" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/helm" + "github.com/goto/entropy/pkg/kube" + "github.com/mitchellh/mapstructure" +) + +const ( + desiredStatusRunning = "RUNNING" + desiredStatusStopped = "STOPPED" +) + +const ( + kubeConfigModeAutoscaler = "AUTOSCALER" + resourceName = "firehose" + + firehoseAutoscalerTaintKey = "firehose_autoscaler" + firehoseNonAutoscalerTaintKey = "firehose_non_autoscaler" +) + +const ( + stepReleaseCreate = "release_create" + stepReleaseUpdate = "release_update" + stepReleaseStop = "release_stop" + stepKafkaReset = "consumer_reset" +) + +const ( + chartRepo = "https://goto.github.io/charts/" + chartName = "firehose" + imageRepo = "gotocompany/firehose" +) + +const ( + labelsConfKey = "labels" + + labelDeployment = "deployment" + labelOrchestrator = "orchestrator" + labelURN = "urn" + labelName = "name" + labelNamespace = "namespace" + + orchestratorLabelValue = "entropy" +) + +const defaultKey = "default" + +var defaultDriverConf = driverConf{ + Namespace: map[string]string{ + defaultKey: "firehose", + }, + ChartValues: ChartValues{ + ImageRepository: imageRepo, + ImageTag: "latest", + ChartVersion: "0.1.3", + ImagePullPolicy: "IfNotPresent", + }, + RequestsAndLimits: map[string]RequestsAndLimits{ + defaultKey: { + Limits: UsageSpec{ + CPU: "200m", + Memory: "512Mi", + }, + Requests: UsageSpec{ + CPU: "200m", + Memory: "512Mi", + }, + }, + }, +} + +type firehoseDriver struct { + timeNow func() time.Time + conf driverConf + kubeDeploy kubeDeployFn + kubeGetPod kubeGetPodFn + kubeGetDeployment kubeGetDeploymentFn + consumerReset consumerResetFn +} + +type ( + kubeDeployFn func(ctx context.Context, isCreate bool, conf kube.Config, hc helm.ReleaseConfig) error + kubeGetPodFn func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) + kubeGetDeploymentFn func(ctx context.Context, conf kube.Config, ns string, name string) (kube.Deployment, error) + consumerResetFn func(ctx context.Context, conf Config, out kubernetes.Output, resetTo string, offsetResetDelaySeconds int) error +) + +type driverConf struct { + // Labels to be injected to the chart during deployment. Values can be Go templates. + Labels map[string]string `json:"labels,omitempty"` + + // Telegraf is the telegraf configuration for the deployment. + Telegraf *Telegraf `json:"telegraf"` + + // Namespace is the kubernetes namespace where firehoses will be deployed. + Namespace map[string]string `json:"namespace" validate:"required"` + + // ChartValues is the chart and image version information. + ChartValues ChartValues `json:"chart_values" validate:"required"` + + // Tolerations represents the tolerations to be set for the deployment. + // The key in the map is the sink-type in upper case. + Tolerations map[string]kubernetes.Toleration `json:"tolerations"` + + EnvVariables map[string]string `json:"env_variables,omitempty"` + + // InitContainer can be set to have a container that is used as init_container on the + // deployment. + InitContainer InitContainer `json:"init_container"` + + // GCSSinkCredential can be set to the name of kubernetes secret containing GCS credential. + // The secret must already exist on the target kube cluster in the same namespace. + // The secret will be mounted as a volume and the appropriate credential path will be set. + GCSSinkCredential string `json:"gcs_sink_credential,omitempty"` + + // DLQGCSSinkCredential is same as GCSSinkCredential but for DLQ. + DLQGCSSinkCredential string `json:"dlq_gcs_sink_credential,omitempty"` + + // BigQuerySinkCredential is same as GCSSinkCredential but for BigQuery credential. + BigQuerySinkCredential string `json:"big_query_sink_credential,omitempty"` + + // RequestsAndLimits can be set to configure the container cpu/memory requests & limits. + // 'default' key will be used as base and any sink-type will be used as the override. + RequestsAndLimits map[string]RequestsAndLimits `json:"requests_and_limits" validate:"required"` + + // NodeAffinityMatchExpressions can be used to set node-affinity for the deployment. + NodeAffinityMatchExpressions kubernetes.NodeAffinityMatchExpressions `json:"node_affinity_match_expressions"` + + // delay between stopping a firehose and making an offset reset request + OffsetResetDelaySeconds int `json:"offset_reset_delay_seconds"` + + // timeout value for a kube deployment run + KubeDeployTimeout int `json:"kube_deploy_timeout_seconds"` + + Autoscaler FirehoseAutoscaler `json:"autoscaler,omitempty"` +} + +type FirehoseAutoscaler struct { + Keda map[string]Keda `json:"keda,omitempty"` +} + +type Triggers map[string]Trigger + +type RequestsAndLimits struct { + Limits UsageSpec `json:"limits,omitempty"` + Requests UsageSpec `json:"requests,omitempty"` +} + +type InitContainer struct { + Enabled bool `json:"enabled"` + + Args []string `json:"args"` + Command []string `json:"command"` + + Repository string `json:"repository"` + ImageTag string `json:"image_tag"` + PullPolicy string `json:"pull_policy"` +} + +type UsageSpec struct { + CPU string `json:"cpu,omitempty" validate:"required"` + Memory string `json:"memory,omitempty" validate:"required"` +} + +type Output struct { + Pods []kube.Pod `json:"pods,omitempty"` + Namespace string `json:"namespace,omitempty"` + ReleaseName string `json:"release_name,omitempty"` + Deployment *kube.Deployment `json:"deployment,omitempty"` + DesiredStatus string `json:"desired_status,omitempty"` + AutoscalerEnabled bool `json:"autoscaler_enabled,omitempty"` +} + +type transientData struct { + PendingSteps []string `json:"pending_steps"` + ResetOffsetTo string `json:"reset_offset_to,omitempty"` +} + +func (fd *firehoseDriver) getHelmRelease(res resource.Resource, conf Config, + kubeOut kubernetes.Output, +) (*helm.ReleaseConfig, error) { + var telegrafConf Telegraf + + entropyLabels := map[string]string{ + labelDeployment: conf.DeploymentID, + labelOrchestrator: orchestratorLabelValue, + } + + otherLabels := map[string]string{ + labelURN: res.URN, + labelName: res.Name, + labelNamespace: conf.Namespace, + } + + deploymentLabels, err := renderTpl(fd.conf.Labels, modules.CloneAndMergeMaps(res.Labels, modules.CloneAndMergeMaps(entropyLabels, otherLabels))) + if err != nil { + return nil, err + } + + if conf.Telegraf != nil && conf.Telegraf.Enabled { + mergedLabelsAndEnvVariablesMap := modules.CloneAndMergeMaps(modules.CloneAndMergeMaps(conf.EnvVariables, modules.CloneAndMergeMaps(deploymentLabels, modules.CloneAndMergeMaps(res.Labels, entropyLabels))), otherLabels) + + conf.EnvVariables, err = renderTpl(conf.EnvVariables, mergedLabelsAndEnvVariablesMap) + if err != nil { + return nil, err + } + + telegrafTags, err := renderTpl(conf.Telegraf.Config.AdditionalGlobalTags, mergedLabelsAndEnvVariablesMap) + if err != nil { + return nil, err + } + + for key, val := range conf.Telegraf.Config.Output { + valAsMap, ok := val.(map[string]interface{}) + if !ok { + continue + } + + valAsMap, err = renderTplOfMapStringAny(valAsMap, mergedLabelsAndEnvVariablesMap) + if err != nil { + return nil, err + } + + conf.Telegraf.Config.Output[key] = valAsMap + } + + telegrafConf = Telegraf{ + Enabled: true, + Image: conf.Telegraf.Image, + Config: TelegrafConf{ + Output: conf.Telegraf.Config.Output, + AdditionalGlobalTags: telegrafTags, + }, + } + } + + var tolerationKey = "" + tolerations := []map[string]any{} + tolerationMode := kubeOut.TolerationMode[resourceName] + if tolerationMode == kubeConfigModeAutoscaler { + if conf.Autoscaler == nil || !conf.Autoscaler.Enabled { + tolerationKey = firehoseNonAutoscalerTaintKey + } else { + tolerationKey = firehoseAutoscalerTaintKey + } + } else { + // undefined or sink_type + tolerationKey = fmt.Sprintf("firehose_%s", conf.EnvVariables["SINK_TYPE"]) + } + + for _, t := range kubeOut.Tolerations[tolerationKey] { + tolerations = append(tolerations, map[string]any{ + "key": t.Key, + "value": t.Value, + "effect": t.Effect, + "operator": t.Operator, + }) + } + + mountSecrets := []map[string]any{} + + requiredDuringSchedulingIgnoredDuringExecution := []kubernetes.Preference{} + preferredDuringSchedulingIgnoredDuringExecution := []kubernetes.WeightedPreference{} + + var affinityKey = "" + affinityMode := kubeOut.AffinityMode[resourceName] + if affinityMode == kubeConfigModeAutoscaler { + if conf.Autoscaler == nil || !conf.Autoscaler.Enabled { + affinityKey = firehoseNonAutoscalerTaintKey + } else { + affinityKey = firehoseAutoscalerTaintKey + } + } else { + affinityKey = fmt.Sprintf("firehose_%s", conf.EnvVariables["SINK_TYPE"]) + } + + if affinity, ok := kubeOut.Affinities[affinityKey]; ok { + requiredDuringSchedulingIgnoredDuringExecution = affinity.RequiredDuringSchedulingIgnoredDuringExecution + preferredDuringSchedulingIgnoredDuringExecution = affinity.PreferredDuringSchedulingIgnoredDuringExecution + } + + if fd.conf.NodeAffinityMatchExpressions.RequiredDuringSchedulingIgnoredDuringExecution != nil { + requiredDuringSchedulingIgnoredDuringExecution = fd.conf.NodeAffinityMatchExpressions.RequiredDuringSchedulingIgnoredDuringExecution + } + if fd.conf.NodeAffinityMatchExpressions.PreferredDuringSchedulingIgnoredDuringExecution != nil { + preferredDuringSchedulingIgnoredDuringExecution = fd.conf.NodeAffinityMatchExpressions.PreferredDuringSchedulingIgnoredDuringExecution + } + + if fd.conf.GCSSinkCredential != "" { + const mountFile = "gcs_auth.json" + credPath := fmt.Sprintf("/etc/secret/%s", mountFile) + + mountSecrets = append(mountSecrets, map[string]any{ + "value": fd.conf.GCSSinkCredential, + "key": "gcs_credential", + "path": mountFile, + }) + conf.EnvVariables["SINK_BLOB_GCS_CREDENTIAL_PATH"] = credPath + conf.EnvVariables["SINK_BIGTABLE_CREDENTIAL_PATH"] = credPath + } + + if fd.conf.DLQGCSSinkCredential != "" { + const mountFile = "dlq_gcs_auth.json" + credPath := fmt.Sprintf("/etc/secret/%s", mountFile) + + mountSecrets = append(mountSecrets, map[string]any{ + "value": fd.conf.DLQGCSSinkCredential, + "key": "dlq_gcs_credential", + "path": mountFile, + }) + conf.EnvVariables["DLQ_GCS_CREDENTIAL_PATH"] = credPath + } + + if fd.conf.BigQuerySinkCredential != "" { + const mountFile = "bigquery_auth.json" + credPath := fmt.Sprintf("/etc/secret/%s", mountFile) + + mountSecrets = append(mountSecrets, map[string]any{ + "value": fd.conf.BigQuerySinkCredential, + "key": "bigquery_credential", + "path": mountFile, + }) + conf.EnvVariables["SINK_BIGQUERY_CREDENTIAL_PATH"] = credPath + } + + rc := helm.DefaultReleaseConfig() + rc.Timeout = fd.conf.KubeDeployTimeout + rc.Name = conf.DeploymentID + rc.Repository = chartRepo + rc.Chart = chartName + rc.Namespace = conf.Namespace + rc.ForceUpdate = true + rc.Version = conf.ChartValues.ChartVersion + + imageRepository := fd.conf.ChartValues.ImageRepository + if conf.ChartValues.ImageRepository != "" { + imageRepository = conf.ChartValues.ImageRepository + } + + requiredDuringSchedulingIgnoredDuringExecutionInterface := preferenceSliceToInterfaceSlice(requiredDuringSchedulingIgnoredDuringExecution) + preferredDuringSchedulingIgnoredDuringExecutionInterface := weightedPreferencesToInterfaceSlice(preferredDuringSchedulingIgnoredDuringExecution) + + rc.Values = map[string]any{ + labelsConfKey: modules.CloneAndMergeMaps(deploymentLabels, entropyLabels), + "replicaCount": conf.Replicas, + "firehose": map[string]any{ + "image": map[string]any{ + "repository": imageRepository, + "pullPolicy": conf.ChartValues.ImagePullPolicy, + "tag": conf.ChartValues.ImageTag, + }, + "config": conf.EnvVariables, + "resources": map[string]any{ + "limits": map[string]any{ + "cpu": conf.Limits.CPU, + "memory": conf.Limits.Memory, + }, + "requests": map[string]any{ + "cpu": conf.Requests.CPU, + "memory": conf.Requests.Memory, + }, + }, + }, + "tolerations": tolerations, + "nodeAffinityMatchExpressions": map[string]any{ + "requiredDuringSchedulingIgnoredDuringExecution": requiredDuringSchedulingIgnoredDuringExecutionInterface, + "preferredDuringSchedulingIgnoredDuringExecution": preferredDuringSchedulingIgnoredDuringExecutionInterface, + }, + "init-firehose": map[string]any{ + "enabled": fd.conf.InitContainer.Enabled, + "image": map[string]any{ + "repository": fd.conf.InitContainer.Repository, + "pullPolicy": fd.conf.InitContainer.PullPolicy, + "tag": fd.conf.InitContainer.ImageTag, + }, + "command": fd.conf.InitContainer.Command, + "args": fd.conf.InitContainer.Args, + }, + "telegraf": map[string]any{ + "enabled": telegrafConf.Enabled, + "image": telegrafConf.Image, + "config": map[string]any{ + "output": telegrafConf.Config.Output, + "additional_global_tags": telegrafConf.Config.AdditionalGlobalTags, + }, + }, + "mountSecrets": mountSecrets, + } + + if conf.Autoscaler != nil { + rc.Values["autoscaler"], err = conf.Autoscaler.GetHelmValues(conf) + if err != nil { + return nil, err + } + } + + return rc, nil +} + +func renderTpl(labelsTpl map[string]string, labelsValues map[string]string) (map[string]string, error) { + const useZeroValueForMissingKey = "missingkey=zero" + + finalLabels := map[string]string{} + for k, v := range labelsTpl { + var buf bytes.Buffer + t, err := template.New("").Option(useZeroValueForMissingKey).Parse(v) + if err != nil { + return nil, errors.ErrInvalid. + WithMsgf("label template for '%s' is invalid", k).WithCausef(err.Error()) + } else if err := t.Execute(&buf, labelsValues); err != nil { + return nil, errors.ErrInvalid. + WithMsgf("failed to render label template").WithCausef(err.Error()) + } + + // allow empty values + // labelVal := strings.TrimSpace(buf.String()) + // if labelVal == "" { + // continue + // } + + finalLabels[k] = buf.String() + } + return finalLabels, nil +} + +func mergeChartValues(cur, newVal *ChartValues) (*ChartValues, error) { + if newVal == nil { + return cur, nil + } + + merged := ChartValues{ + ImageRepository: cur.ImageRepository, + ImageTag: cur.ImageTag, + ChartVersion: cur.ChartVersion, + ImagePullPolicy: cur.ImagePullPolicy, + } + + newTag := strings.TrimSpace(newVal.ImageTag) + if newTag != "" { + if strings.Contains(newTag, ":") && !strings.HasPrefix(newTag, imageRepo) { + return nil, errors.ErrInvalid. + WithMsgf("unknown image repo: '%s', must start with '%s'", newTag, imageRepo) + } + merged.ImageTag = strings.TrimPrefix(newTag, imageRepo+":") + } + + return &merged, nil +} + +func readOutputData(exr module.ExpandedResource) (*Output, error) { + var curOut Output + if len(exr.Resource.State.Output) == 0 { + return &curOut, nil + } + if err := json.Unmarshal(exr.Resource.State.Output, &curOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("corrupted output").WithCausef(err.Error()) + } + return &curOut, nil +} + +func readTransientData(exr module.ExpandedResource) (*transientData, error) { + if len(exr.Resource.State.ModuleData) == 0 { + return &transientData{}, nil + } + + var modData transientData + if err := json.Unmarshal(exr.Resource.State.ModuleData, &modData); err != nil { + return nil, errors.ErrInternal.WithMsgf("corrupted transient data").WithCausef(err.Error()) + } + return &modData, nil +} + +func (us UsageSpec) merge(overide UsageSpec) UsageSpec { + clone := us + + if overide.CPU != "" { + clone.CPU = overide.CPU + } + + if overide.Memory != "" { + clone.Memory = overide.Memory + } + + return clone +} + +func renderTplOfMapStringAny(labelsTpl map[string]any, labelsValues map[string]string) (map[string]any, error) { + outputMap := make(map[string]string) + + for key, value := range labelsTpl { + if strValue, ok := value.(string); ok { + outputMap[key] = strValue + } + } + + outputMap, err := renderTpl(outputMap, labelsValues) + if err != nil { + return nil, err + } + + for key, val := range outputMap { + labelsTpl[key] = val + } + + return labelsTpl, nil +} + +func preferenceSliceToInterfaceSlice(prefs []kubernetes.Preference) []map[string]interface{} { + result := make([]map[string]interface{}, len(prefs)) + + for i, pref := range prefs { + var prefMap map[string]interface{} + if err := mapstructure.Decode(pref, &prefMap); err != nil { + continue + } + + lowercaseMap := make(map[string]interface{}) + for k, v := range prefMap { + lowercaseMap[strings.ToLower(k)] = v + } + result[i] = lowercaseMap + } + + return result +} + +func weightedPreferencesToInterfaceSlice(weightedPrefs []kubernetes.WeightedPreference) []map[string]interface{} { + result := make([]map[string]interface{}, len(weightedPrefs)) + + for i, wp := range weightedPrefs { + var wpMap map[string]interface{} + if err := mapstructure.Decode(wp, &wpMap); err != nil { + continue + } + + lowercaseMap := make(map[string]interface{}) + for k, v := range wpMap { + // Special handling for the preference field + if k == "Preference" && v != nil { + // Convert the nested Preference slice + lowercaseMap["preference"] = preferenceSliceToInterfaceSlice(wp.Preference) + } else { + lowercaseMap[strings.ToLower(k)] = v + } + } + result[i] = lowercaseMap + } + + return result +} diff --git a/modules/firehose/driver_log.go b/modules/firehose/driver_log.go new file mode 100644 index 00000000..da45c3e8 --- /dev/null +++ b/modules/firehose/driver_log.go @@ -0,0 +1,55 @@ +package firehose + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +func (fd *firehoseDriver) Log(ctx context.Context, res module.ExpandedResource, filter map[string]string) (<-chan module.LogChunk, error) { + conf, err := readConfig(res.Resource, res.Spec.Configs, fd.conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + if filter == nil { + filter = map[string]string{} + } + filter["app"] = conf.DeploymentID + + var kubeOut kubernetes.Output + if err := json.Unmarshal(res.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + kubeCl, err := kube.NewClient(ctx, kubeOut.Configs) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("failed to create new kube client on firehose driver Log").WithCausef(err.Error()) + } + + logs, err := kubeCl.StreamLogs(ctx, conf.Namespace, filter) + if err != nil { + return nil, err + } + + mappedLogs := make(chan module.LogChunk) + go func() { + defer close(mappedLogs) + for { + select { + case log, ok := <-logs: + if !ok { + return + } + mappedLogs <- module.LogChunk{Data: log.Data, Labels: log.Labels} + case <-ctx.Done(): + return + } + } + }() + + return mappedLogs, err +} diff --git a/modules/firehose/driver_output.go b/modules/firehose/driver_output.go new file mode 100644 index 00000000..637d9cd0 --- /dev/null +++ b/modules/firehose/driver_output.go @@ -0,0 +1,64 @@ +package firehose + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" +) + +func (fd *firehoseDriver) Output(ctx context.Context, exr module.ExpandedResource) (json.RawMessage, error) { + output, err := readOutputData(exr) + if err != nil { + return nil, err + } + + conf, err := readConfig(exr.Resource, exr.Spec.Configs, fd.conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + var kubeOut kubernetes.Output + if err := json.Unmarshal(exr.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + return fd.refreshOutput(ctx, exr.Resource, *conf, *output, kubeOut) +} + +func (fd *firehoseDriver) refreshOutput(ctx context.Context, r resource.Resource, + conf Config, output Output, kubeOut kubernetes.Output, +) (json.RawMessage, error) { + rc, err := fd.getHelmRelease(r, conf, kubeOut) + if err != nil { + return nil, err + } + + pods, err := fd.kubeGetPod(ctx, kubeOut.Configs, rc.Namespace, map[string]string{"app": rc.Name}) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + output.Pods = pods + output.Namespace = conf.Namespace + + deployment, err := fd.kubeGetDeployment(ctx, kubeOut.Configs, rc.Namespace, conf.DeploymentID) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + output.Deployment = &deployment + output.DesiredStatus = fd.desiredStatus(conf) + output.AutoscalerEnabled = conf.Autoscaler != nil && conf.Autoscaler.Enabled + + return modules.MustJSON(output), nil +} + +func (fd *firehoseDriver) desiredStatus(conf Config) string { + if conf.Stopped { + return desiredStatusStopped + } + return desiredStatusRunning +} diff --git a/modules/firehose/driver_output_test.go b/modules/firehose/driver_output_test.go new file mode 100644 index 00000000..7d3eee5b --- /dev/null +++ b/modules/firehose/driver_output_test.go @@ -0,0 +1,330 @@ +package firehose + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +func TestFirehoseDriver_Output(t *testing.T) { + t.Parallel() + + table := []struct { + title string + kubeGetPod func(t *testing.T) kubeGetPodFn + kubeGetDeployment func(t *testing.T) kubeGetDeploymentFn + exr module.ExpandedResource + want json.RawMessage + wantErr error + }{ + { + title: "InvalidModuleData", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + State: resource.State{}, + }, + }, + wantErr: errors.ErrInternal, + }, + { + title: "InvalidOutput", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + State: resource.State{ + ModuleData: modules.MustJSON(transientData{}), + }, + }, + }, + wantErr: errors.ErrInternal, + }, + { + title: "InvalidConfig", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + State: resource.State{ + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{}), + }, + }, + }, + wantErr: errors.ErrInternal, + }, + { + title: "GetPod_Failure", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{}), + }, "LOG", "firehose"), + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return nil, errors.New("failed") + } + }, + wantErr: errors.ErrInternal, + }, + { + title: "GetPod_Success", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Pods: nil, + Namespace: "firehose", + ReleaseName: "foo-bar", + DesiredStatus: "RUNNING", + }), + }, "LOG", "firehose"), + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + kubeGetDeployment: func(t *testing.T) kubeGetDeploymentFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, name string) (kube.Deployment, error) { + assert.Equal(t, ns, "firehose") + return kube.Deployment{ + Name: "foo-bar", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, nil + } + }, + want: modules.MustJSON(Output{ + Pods: []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, + Deployment: &kube.Deployment{ + Name: "foo-bar", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, + Namespace: "firehose", + ReleaseName: "foo-bar", + DesiredStatus: "RUNNING", + }), + }, + { + title: "GetDeployment_Failure", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Pods: nil, + Namespace: "firehose", + ReleaseName: "foo-bar", + DesiredStatus: "RUNNING", + }), + }, "LOG", "firehose"), + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + kubeGetDeployment: func(t *testing.T) kubeGetDeploymentFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, name string) (kube.Deployment, error) { + assert.Equal(t, ns, "firehose") + return kube.Deployment{}, errors.New("failed") + } + }, + wantErr: errors.ErrInternal, + }, + { + title: "GetDeployment_Success", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Pods: nil, + Namespace: "firehose", + ReleaseName: "foo-bar", + DesiredStatus: "RUNNING", + }), + }, "LOG", "firehose"), + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + kubeGetDeployment: func(t *testing.T) kubeGetDeploymentFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, name string) (kube.Deployment, error) { + assert.Equal(t, ns, "firehose") + return kube.Deployment{ + Name: "foo-bar", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, nil + } + }, + want: modules.MustJSON(Output{ + Pods: []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, + Deployment: &kube.Deployment{ + Name: "foo-bar", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, + Namespace: "firehose", + ReleaseName: "foo-bar", + DesiredStatus: "RUNNING", + }), + }, + { + title: "Update_Namespace", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Pods: nil, + Namespace: "firehose", + ReleaseName: "foo-bar", + DesiredStatus: "RUNNING", + }), + }, "BIGQUERY", "bigquery-firehose"), + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "bigquery-firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + kubeGetDeployment: func(t *testing.T) kubeGetDeploymentFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, name string) (kube.Deployment, error) { + assert.Equal(t, ns, "bigquery-firehose") + return kube.Deployment{ + Name: "foo-bar", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, nil + } + }, + want: modules.MustJSON(Output{ + Pods: []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, + Deployment: &kube.Deployment{ + Name: "foo-bar", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, + Namespace: "bigquery-firehose", + ReleaseName: "foo-bar", + DesiredStatus: "RUNNING", + }), + }, + } + + for _, tt := range table { + t.Run(tt.title, func(t *testing.T) { + fd := &firehoseDriver{ + conf: defaultDriverConf, + timeNow: func() time.Time { return frozenTime }, + } + + fd.conf.Namespace = map[string]string{ + defaultKey: "firehose", + "BIGQUERY": "bigquery-firehose", + } + + if tt.kubeGetPod != nil { + fd.kubeGetPod = tt.kubeGetPod(t) + } + + if tt.kubeGetDeployment != nil { + fd.kubeGetDeployment = tt.kubeGetDeployment(t) + } + + got, err := fd.Output(context.Background(), tt.exr) + if tt.wantErr != nil { + require.Error(t, err) + assert.True(t, errors.Is(err, tt.wantErr), "wantErr=%v\ngotErr=%v", tt.wantErr, err) + } else { + assert.NoError(t, err) + require.NotNil(t, got) + assert.JSONEq(t, string(tt.want), string(got)) + } + }) + } +} diff --git a/modules/firehose/driver_plan.go b/modules/firehose/driver_plan.go new file mode 100644 index 00000000..4f0a83db --- /dev/null +++ b/modules/firehose/driver_plan.go @@ -0,0 +1,302 @@ +package firehose + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strconv" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kafka" +) + +const SourceKafkaConsumerAutoOffsetReset = "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET" + +var suffixRegex = regexp.MustCompile(`^([A-Za-z0-9-]+)-([0-9]+)$`) + +var errCauseInvalidNamespaceUpdate = "cannot update kube namespace of a running firehose" + +func (fd *firehoseDriver) Plan(_ context.Context, exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + switch act.Name { + case module.CreateAction: + return fd.planCreate(exr, act) + + case ResetAction: + return fd.planReset(exr, act) + + case ResetV2Action: + return fd.planResetV2(exr, act) + + default: + return fd.planChange(exr, act) + } +} + +func (fd *firehoseDriver) planChange(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + curConf, err := readConfig(exr.Resource, exr.Resource.Spec.Configs, fd.conf) + if err != nil { + return nil, err + } + + switch act.Name { + case module.UpdateAction: + newConf, err := readConfig(exr.Resource, act.Params, fd.conf) + if err != nil { + return nil, err + } + + chartVals, err := mergeChartValues(curConf.ChartValues, newConf.ChartValues) + if err != nil { + return nil, err + } + + // restore configs that are not user-controlled. + newConf.DeploymentID = curConf.DeploymentID + newConf.ChartValues = chartVals + newConf.Telegraf = fd.conf.Telegraf + newConf.InitContainer = fd.conf.InitContainer + + ns := fd.conf.Namespace[defaultKey] + if override, ok := fd.conf.Namespace[newConf.EnvVariables[confSinkType]]; ok { + ns = override + } + + // override namespace during update + var kubeOut kubernetes.Output + if err := json.Unmarshal(exr.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + if kubeOut.Configs.Namespace != "" { + ns = kubeOut.Configs.Namespace + } + + newConf.Namespace = ns + + if curConf.Namespace != newConf.Namespace { + if !curConf.Stopped { + return nil, errors.ErrInvalid.WithCausef(errCauseInvalidNamespaceUpdate) + } + } + + curConf = newConf + + case ScaleAction: + var scaleParams ScaleParams + if err := json.Unmarshal(act.Params, &scaleParams); err != nil { + return nil, errors.ErrInvalid.WithMsgf("invalid params for scale action").WithCausef(err.Error()) + } else if scaleParams.Replicas < 1 { + return nil, errors.ErrInvalid.WithMsgf("replicas must be >= 1") + } + + curConf.Replicas = scaleParams.Replicas + + case StartAction: + var startParams StartParams + if err := json.Unmarshal(act.Params, &startParams); err != nil { + return nil, errors.ErrInvalid.WithMsgf("invalid params for start action").WithCausef(err.Error()) + } + curConf.Stopped = false + if startParams.StopTime != nil { + curConf.StopTime = startParams.StopTime + } + + if curConf.Autoscaler != nil { + curConf.Autoscaler.Spec.Resume() + } + + case StopAction: + curConf.Stopped = true + + if curConf.Autoscaler != nil { + curConf.Autoscaler.Spec.Pause(0) + } + + case UpgradeAction: + // upgrade the chart values to the latest project-level config. + // Note: upgrade/downgrade will happen based on module-level configs. + curConf.ChartValues = &fd.conf.ChartValues + } + + immediately := fd.timeNow() + + exr.Resource.Spec.Configs = modules.MustJSON(curConf) + + err = fd.validateHelmReleaseConfigs(exr, *curConf) + if err != nil { + return nil, err + } + + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &immediately, + } + + return &exr.Resource, nil +} + +func (fd *firehoseDriver) planCreate(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + conf, err := readConfig(exr.Resource, act.Params, fd.conf) + if err != nil { + return nil, err + } + + chartVals, err := mergeChartValues(&fd.conf.ChartValues, conf.ChartValues) + if err != nil { + return nil, err + } + + // set project defaults. + conf.Telegraf = fd.conf.Telegraf + conf.ChartValues = chartVals + + // override namespace during creation + var kubeOut kubernetes.Output + if err := json.Unmarshal(exr.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + if kubeOut.Configs.Namespace != "" { + conf.Namespace = kubeOut.Configs.Namespace + } + + immediately := fd.timeNow() + + exr.Resource.Spec.Configs = modules.MustJSON(conf) + + err = fd.validateHelmReleaseConfigs(exr, *conf) + if err != nil { + return nil, err + } + + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: conf.Namespace, + ReleaseName: conf.DeploymentID, + }), + NextSyncAt: &immediately, + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + } + + return &exr.Resource, nil +} + +func (fd *firehoseDriver) planResetV2(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + resetValue, err := kafka.ParseResetV2Params(act.Params) + if err != nil { + return nil, err + } + + immediately := fd.timeNow() + + curConf, err := readConfig(exr.Resource, exr.Resource.Spec.Configs, fd.conf) + if err != nil { + return nil, err + } + + curConf.ResetOffset = resetValue + + exr.Resource.Spec.Configs = modules.MustJSON(curConf) + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + NextSyncAt: &immediately, + ModuleData: modules.MustJSON(transientData{ + ResetOffsetTo: resetValue, + PendingSteps: []string{ + stepReleaseStop, // stop the firehose + stepKafkaReset, // reset the consumer group offset value. + stepReleaseUpdate, // restart the deployment. + }, + }), + } + return &exr.Resource, nil +} + +func (fd *firehoseDriver) planReset(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + resetValue, err := kafka.ParseResetParams(act.Params) + if err != nil { + return nil, err + } + + immediately := fd.timeNow() + + curConf, err := readConfig(exr.Resource, exr.Resource.Spec.Configs, fd.conf) + if err != nil { + return nil, err + } + + curConf.ResetOffset = resetValue + curConf.EnvVariables[SourceKafkaConsumerAutoOffsetReset] = resetValue + curConf.EnvVariables[confKeyConsumerID], err = getNewConsumerGroupID(curConf.EnvVariables[confKeyConsumerID]) + if err != nil { + return nil, err + } + + // if keda autoscaler enabled, update scaler metadata value + if curConf.Autoscaler != nil && curConf.Autoscaler.Type == KEDA { + kedaSpec, ok := curConf.Autoscaler.Spec.(*Keda) + if !ok { + return nil, err + } + + err = kedaSpec.updateTriggersMetadata(curConf.EnvVariables) + if err != nil { + return nil, err + } + curConf.Autoscaler.Spec = kedaSpec + } + + exr.Resource.Spec.Configs = modules.MustJSON(curConf) + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + NextSyncAt: &immediately, + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{ + stepReleaseStop, // stop the firehose + stepReleaseUpdate, // restart the deployment. + }, + }), + } + return &exr.Resource, nil +} + +func getNewConsumerGroupID(curGroup string) (string, error) { + matches := suffixRegex.FindStringSubmatch(curGroup) + if expLen := 3; len(matches) != expLen { + return fmt.Sprintf("%s-1", curGroup), nil + } + prefix, sequence := matches[1], matches[2] + + seq, err := strconv.Atoi(sequence) + if err != nil { + return "", errors.Errorf("error converting group sequence %s to int: %v", sequence, err) + } else { + seq++ + } + + return fmt.Sprintf("%s-%d", prefix, seq), nil +} + +func (fd *firehoseDriver) validateHelmReleaseConfigs(expandedResource module.ExpandedResource, config Config) error { + var kubeOut kubernetes.Output + if err := json.Unmarshal(expandedResource.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { + return errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + _, err := fd.getHelmRelease(expandedResource.Resource, config, kubeOut) + return err +} diff --git a/modules/firehose/driver_plan_create_test.go b/modules/firehose/driver_plan_create_test.go new file mode 100644 index 00000000..0521c980 --- /dev/null +++ b/modules/firehose/driver_plan_create_test.go @@ -0,0 +1,396 @@ +package firehose + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +func TestFirehoseDriver_Plan_create(t *testing.T) { + t.Parallel() + + table := []struct { + title string + exr module.ExpandedResource + act module.ActionRequest + want *resource.Resource + wantErr error + }{ + // create action tests + { + title: "Create_InvalidParamsJSON", + exr: module.ExpandedResource{}, + act: module.ActionRequest{ + Name: module.CreateAction, + Params: []byte("{"), + }, + wantErr: errors.ErrInvalid, + }, + { + title: "Create_InvalidParamsValue", + exr: module.ExpandedResource{}, + act: module.ActionRequest{ + Name: module.CreateAction, + Params: []byte("{}"), + }, + wantErr: errors.ErrInvalid, + }, + { + title: "Create_LongName", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:ABCDEFGHIJKLMNOPQRSTUVWXYZ:abcdefghijklmnopqrstuvwxyz", + Kind: "firehose", + Name: "abcdefghijklmnopqrstuvwxyz", + Project: "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + }, + act: module.ActionRequest{ + Name: module.CreateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 1, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:ABCDEFGHIJKLMNOPQRSTUVWXYZ:abcdefghijklmnopqrstuvwxyz", + Kind: "firehose", + Name: "abcdefghijklmnopqrstuvwxyz", + Project: "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "stopped": false, + "replicas": 1, + "namespace": "firehose", + "deployment_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghij-3801d0-firehose", + "chart_values": map[string]string{ + "chart_version": "0.1.3", + "image_repository": "gotocompany/firehose", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghij-3801d0-firehose-1", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghij-3801d0-firehose", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + { + title: "Create_ValidRequest", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + }, + act: module.ActionRequest{ + Name: module.CreateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 1, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "stopped": false, + "replicas": 1, + "namespace": "firehose", + "deployment_id": "foo-fh1-firehose", + "chart_values": map[string]string{ + + "chart_version": "0.1.3", + "image_repository": "gotocompany/firehose", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "foo-fh1-firehose", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + { + title: "Create_ValidRequest_Bigquery", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + }, + act: module.ActionRequest{ + Name: module.CreateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 1, + "env_variables": map[string]string{ + "SINK_TYPE": "BIGQUERY", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "stopped": false, + "replicas": 1, + "namespace": "bigquery-firehose", + "deployment_id": "foo-fh1-firehose", + "chart_values": map[string]string{ + + "chart_version": "0.1.3", + "image_repository": "gotocompany/firehose", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "BIGQUERY", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "bigquery-firehose", + ReleaseName: "foo-fh1-firehose", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + { + // if kube resource has namespace key, all resources will be deployed to that namespace value + title: "Create_ValidRequest_OverrideNamespace", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{ + Configs: kube.Config{ + Namespace: "override-namespace", + }, + }), + }, + }, + }, + act: module.ActionRequest{ + Name: module.CreateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 1, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "stopped": false, + "replicas": 1, + "namespace": "override-namespace", + "deployment_id": "foo-fh1-firehose", + "chart_values": map[string]string{ + + "chart_version": "0.1.3", + "image_repository": "gotocompany/firehose", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "override-namespace", + ReleaseName: "foo-fh1-firehose", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + } + + for _, tt := range table { + t.Run(tt.title, func(t *testing.T) { + dr := &firehoseDriver{ + conf: defaultDriverConf, + timeNow: func() time.Time { return frozenTime }, + } + + dr.conf.Namespace = map[string]string{ + defaultKey: "firehose", + "BIGQUERY": "bigquery-firehose", + } + + got, err := dr.Plan(context.Background(), tt.exr, tt.act) + if tt.wantErr != nil { + require.Error(t, err) + assert.Nil(t, got) + assert.True(t, errors.Is(err, tt.wantErr), "wantErr=%v\ngotErr=%v", tt.wantErr, err) + } else { + assert.NoError(t, err) + require.NotNil(t, got) + + wantJSON := string(modules.MustJSON(tt.want)) + gotJSON := string(modules.MustJSON(got)) + assert.JSONEq(t, wantJSON, gotJSON) + } + }) + } +} diff --git a/modules/firehose/driver_plan_test.go b/modules/firehose/driver_plan_test.go new file mode 100644 index 00000000..b233c102 --- /dev/null +++ b/modules/firehose/driver_plan_test.go @@ -0,0 +1,372 @@ +package firehose + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" +) + +var frozenTime = time.Unix(1679668743, 0) + +func TestFirehoseDriver_Plan(t *testing.T) { + t.Parallel() + + table := []struct { + title string + exr module.ExpandedResource + act module.ActionRequest + want *resource.Resource + wantErr error + }{ + // reset action tests + { + title: "Reset_InValid", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "foo", + ReleaseName: "bar", + }), + }, + }, + }, + act: module.ActionRequest{ + Name: ResetAction, + Params: modules.MustJSON(map[string]any{ + "to": "some_random", + }), + }, + wantErr: errors.ErrInvalid, + }, + { + title: "Reset_Valid", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "firehose-deployment-x-1", + "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET": "latest", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + }, + }, + }, + act: module.ActionRequest{ + Name: ResetAction, + Params: modules.MustJSON(map[string]any{ + "to": "earliest", + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "firehose", + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "firehose-deployment-x-2", + "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET": "earliest", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "reset_offset": "earliest", + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "stopped": false, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{ + stepReleaseStop, + stepReleaseUpdate, + }, + }), + NextSyncAt: &frozenTime, + }, + }, + }, + + // upgrade action tests + { + title: "Upgrade_Valid", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "stopped": false, + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "0.1.0", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + }, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + }, + act: module.ActionRequest{ + Name: UpgradeAction, + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "firehose", + "stopped": false, + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + + "chart_version": "0.1.3", + "image_repository": "gotocompany/firehose", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + + // scale action tests + { + title: "Scale_Invalid_params", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "0.1.0", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "foo", + ReleaseName: "bar", + }), + }, + }, + }, + act: module.ActionRequest{ + Name: ScaleAction, + Params: []byte("{}"), + }, + wantErr: errors.ErrInvalid, + }, + } + + for _, tt := range table { + t.Run(tt.title, func(t *testing.T) { + dr := &firehoseDriver{ + conf: defaultDriverConf, + timeNow: func() time.Time { return frozenTime }, + } + + dr.conf.Namespace = map[string]string{ + defaultKey: "firehose", + "BIGQUERY": "bigquery-firehose", + } + + got, err := dr.Plan(context.Background(), tt.exr, tt.act) + if tt.wantErr != nil { + require.Error(t, err) + assert.Nil(t, got) + assert.True(t, errors.Is(err, tt.wantErr), "wantErr=%v\ngotErr=%v", tt.wantErr, err) + } else { + assert.NoError(t, err) + require.NotNil(t, got) + + wantJSON := string(modules.MustJSON(tt.want)) + gotJSON := string(modules.MustJSON(got)) + assert.JSONEq(t, wantJSON, gotJSON) + } + }) + } +} + +func TestGetNewConsumerGroupID(t *testing.T) { + t.Parallel() + + table := []struct { + title string + deploymentID string + consumerGroupID string + want string + wantErr error + }{ + { + title: "invalid-group-id", + consumerGroupID: "test-firehose-xyz", + want: "test-firehose-xyz-1", + wantErr: nil, + }, + { + title: "valid-group-id", + consumerGroupID: "test-firehose-0999", + want: "test-firehose-1000", + wantErr: nil, + }, + } + + for _, tt := range table { + t.Run(tt.title, func(t *testing.T) { + got, err := getNewConsumerGroupID(tt.consumerGroupID) + if tt.wantErr != nil { + require.Error(t, err) + assert.Equal(t, "", got) + assert.ErrorIs(t, err, tt.wantErr) + } else { + assert.NoError(t, err) + require.NotNil(t, got) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/modules/firehose/driver_plan_update_test.go b/modules/firehose/driver_plan_update_test.go new file mode 100644 index 00000000..1628a709 --- /dev/null +++ b/modules/firehose/driver_plan_update_test.go @@ -0,0 +1,650 @@ +package firehose + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +func TestFirehoseDriver_Plan_Update(t *testing.T) { + t.Parallel() + + table := []struct { + title string + exr module.ExpandedResource + act module.ActionRequest + want *resource.Resource + wantErr error + }{ + // update action tests + { + title: "Update_Valid", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + }, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{ + Tolerations: map[string][]kubernetes.Toleration{}, + }), + }, + }, + }, + act: module.ActionRequest{ + Name: module.UpdateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 10, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", // the change being applied + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "firehose", + "stopped": false, + "replicas": 10, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + { + title: "Update_Valid with overriden namespace", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "overriden-namespace", + "replicas": 1, + "stopped": false, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "overriden-namespace", + ReleaseName: "bar", + }), + }, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{ + Configs: kube.Config{ + Namespace: "overriden-namespace", + }, + Tolerations: map[string][]kubernetes.Toleration{}, + }), + }, + }, + }, + act: module.ActionRequest{ + Name: module.UpdateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 10, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", // the change being applied + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "overriden-namespace", + "stopped": false, + "replicas": 10, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "overriden-namespace", + ReleaseName: "bar", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + // update override image repository + { + title: "Update_Override_Image_Repository", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "newrepo/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + }, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{ + Tolerations: map[string][]kubernetes.Toleration{}, + }), + }, + }, + }, + act: module.ActionRequest{ + Name: module.UpdateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 10, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", // the change being applied + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "firehose", + "stopped": false, + "replicas": 10, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "newrepo/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + { + title: "Update_Resource_&_Limits", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + }, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + }, + act: module.ActionRequest{ + Name: module.UpdateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 10, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", // the change being applied + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "limits": map[string]any{ + "cpu": "500m", + "memory": "2048Mi", + }, + "requests": map[string]any{ + "cpu": "400m", + "memory": "1024Mi", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "firehose", + "stopped": false, + "replicas": 10, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "HTTP", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "limits": map[string]any{ + "cpu": "500m", + "memory": "2048Mi", + }, + "requests": map[string]any{ + "cpu": "400m", + "memory": "1024Mi", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + { + title: "Update_Running_Firehose_Namespace", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "stopped": false, + "deployment_id": "firehose-deployment-x", + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + }, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + }, + act: module.ActionRequest{ + Name: module.UpdateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 10, + "stopped": false, + "env_variables": map[string]string{ + "SINK_TYPE": "BIGQUERY", // the change being applied + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: nil, + wantErr: errors.ErrInvalid.WithCausef(errCauseInvalidNamespaceUpdate), + }, + { + title: "Update_Stopped_Firehose_Namespace", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "firehose", + "replicas": 1, + "stopped": true, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + ReleaseName: "bar", + }), + }, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + }, + act: module.ActionRequest{ + Name: module.UpdateAction, + Params: modules.MustJSON(map[string]any{ + "replicas": 10, + "stopped": false, // shall allow starting at the time of update + "env_variables": map[string]string{ + "SINK_TYPE": "BIGQUERY", // the change being applied + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + want: &resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "namespace": "bigquery-firehose", + "stopped": false, + "replicas": 10, + "deployment_id": "firehose-deployment-x", + "chart_values": map[string]string{ + "image_repository": "gotocompany/firehose", + "chart_version": "1.0.0", + "image_pull_policy": "", + "image_tag": "1.0.0", + }, + "env_variables": map[string]string{ + "SINK_TYPE": "BIGQUERY", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "limits": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "requests": map[string]any{ + "cpu": "200m", + "memory": "512Mi", + }, + "init_container": map[string]interface{}{"args": interface{}(nil), "command": interface{}(nil), "enabled": false, "image_tag": "", "pull_policy": "", "repository": ""}, + }), + }, + State: resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: "firehose", // this is updated when Output is triggered + ReleaseName: "bar", + }), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseUpdate}, + }), + NextSyncAt: &frozenTime, + }, + }, + wantErr: nil, + }, + } + + for _, tt := range table { + t.Run(tt.title, func(t *testing.T) { + dr := &firehoseDriver{ + conf: defaultDriverConf, + timeNow: func() time.Time { return frozenTime }, + } + + dr.conf.Namespace = map[string]string{ + defaultKey: "firehose", + "BIGQUERY": "bigquery-firehose", + } + + got, err := dr.Plan(context.Background(), tt.exr, tt.act) + if tt.wantErr != nil { + require.Error(t, err) + assert.Nil(t, got) + assert.True(t, errors.Is(err, tt.wantErr), "wantErr=%v\ngotErr=%v", tt.wantErr, err) + } else { + assert.NoError(t, err) + require.NotNil(t, got) + + wantJSON := string(modules.MustJSON(tt.want)) + gotJSON := string(modules.MustJSON(got)) + assert.JSONEq(t, wantJSON, gotJSON) + } + }) + } +} diff --git a/modules/firehose/driver_sync.go b/modules/firehose/driver_sync.go new file mode 100644 index 00000000..75e846b5 --- /dev/null +++ b/modules/firehose/driver_sync.go @@ -0,0 +1,116 @@ +package firehose + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" +) + +func (fd *firehoseDriver) Sync(ctx context.Context, exr module.ExpandedResource) (*resource.State, error) { + modData, err := readTransientData(exr) + if err != nil { + return nil, err + } + + out, err := readOutputData(exr) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + conf, err := readConfig(exr.Resource, exr.Spec.Configs, fd.conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + var kubeOut kubernetes.Output + if err := json.Unmarshal(exr.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + finalState := resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + } + + // pickup the next pending step if available. + if len(modData.PendingSteps) > 0 { + pendingStep := modData.PendingSteps[0] + modData.PendingSteps = modData.PendingSteps[1:] + + switch pendingStep { + case stepReleaseCreate, stepReleaseUpdate, stepReleaseStop: + // we want to stop the current deployment. we do this by setting + // replicas to 0. But this value will not be persisted to DB since + // config changes during Sync() are not saved. + if pendingStep == stepReleaseStop || conf.Stopped { + conf.Replicas = 0 + + if conf.Autoscaler != nil { + conf.Autoscaler.Spec.Pause(0) + } + } + + isCreate := pendingStep == stepReleaseCreate + if err := fd.releaseSync(ctx, exr.Resource, isCreate, *conf, kubeOut); err != nil { + return nil, err + } + + case stepKafkaReset: + if err := fd.consumerReset(ctx, *conf, kubeOut, modData.ResetOffsetTo, fd.conf.OffsetResetDelaySeconds); err != nil { + return nil, err + } + + default: + return nil, errors.ErrInternal.WithMsgf("unknown step: '%s'", pendingStep) + } + + // we have more pending states, so enqueue resource for another sync + // as soon as possible. + immediately := fd.timeNow() + finalState.NextSyncAt = &immediately + finalState.ModuleData = modules.MustJSON(modData) + + return &finalState, nil + } + + // even if the resource is in completed state, we check this time to + // see if the firehose is expected to be stopped by this time. + finalState.NextSyncAt = conf.StopTime + if conf.StopTime != nil && conf.StopTime.Before(fd.timeNow()) { + conf.Replicas = 0 + conf.Stopped = true + if err := fd.releaseSync(ctx, exr.Resource, false, *conf, kubeOut); err != nil { + return nil, err + } + finalState.NextSyncAt = nil + } + + finalOut, err := fd.refreshOutput(ctx, exr.Resource, *conf, *out, kubeOut) + if err != nil { + return nil, err + } + finalState.Output = finalOut + + finalState.Status = resource.StatusCompleted + finalState.ModuleData = nil + return &finalState, nil +} + +func (fd *firehoseDriver) releaseSync(ctx context.Context, r resource.Resource, + isCreate bool, conf Config, kubeOut kubernetes.Output, +) error { + rc, err := fd.getHelmRelease(r, conf, kubeOut) + if err != nil { + return err + } + + if err := fd.kubeDeploy(ctx, isCreate, kubeOut.Configs, *rc); err != nil { + return errors.ErrInternal.WithCausef(err.Error()) + } + return nil +} diff --git a/modules/firehose/driver_sync_test.go b/modules/firehose/driver_sync_test.go new file mode 100644 index 00000000..bae17cef --- /dev/null +++ b/modules/firehose/driver_sync_test.go @@ -0,0 +1,333 @@ +package firehose + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/helm" + "github.com/goto/entropy/pkg/kube" +) + +func TestFirehoseDriver_Sync(t *testing.T) { + t.Parallel() + + table := []struct { + title string + kubeDeploy func(t *testing.T) kubeDeployFn + kubeGetPod func(t *testing.T) kubeGetPodFn + kubeGetDeployment func(t *testing.T) kubeGetDeploymentFn + + exr module.ExpandedResource + want *resource.State + wantErr error + }{ + { + title: "InvalidModuleData", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + State: resource.State{}, + }, + }, + wantErr: errors.ErrInternal, + }, + { + title: "InvalidOutput", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + State: resource.State{ + ModuleData: modules.MustJSON(transientData{}), + }, + }, + }, + wantErr: errors.ErrInternal, + }, + { + title: "InvalidConfig", + exr: module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + State: resource.State{ + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{}), + }, + }, + }, + wantErr: errors.ErrInternal, + }, + { + title: "NoPendingStep", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: nil, + }), + }, "LOG", "firehose"), + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + kubeGetDeployment: func(t *testing.T) kubeGetDeploymentFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, name string) (kube.Deployment, error) { + assert.Equal(t, ns, "firehose") + return kube.Deployment{ + Name: "foo-1", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, nil + } + }, + want: &resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + Namespace: "firehose", + Pods: []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, + Deployment: &kube.Deployment{ + Name: "foo-1", + Paused: false, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 2, + Conditions: []map[string]string{}, + }, + DesiredStatus: "RUNNING", + }), + ModuleData: nil, + }, + }, + { + title: "Sync_refresh_output_failure", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{}), + }, "LOG", "firehose"), + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + return nil, errors.New("failed") + } + }, + wantErr: errors.ErrInternal, + }, + { + title: "Sync_release_create_failure", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + }, "LOG", "firehose"), + kubeDeploy: func(t *testing.T) kubeDeployFn { + t.Helper() + return func(ctx context.Context, isCreate bool, conf kube.Config, hc helm.ReleaseConfig) error { + return errors.New("failed") + } + }, + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + wantErr: errors.ErrInternal, + }, + { + title: "Sync_release_create_success", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseCreate}, + }), + }, "LOG", "firehose"), + kubeDeploy: func(t *testing.T) kubeDeployFn { + t.Helper() + return func(ctx context.Context, isCreate bool, conf kube.Config, hc helm.ReleaseConfig) error { + assert.True(t, isCreate) + assert.Equal(t, hc.Values["replicaCount"], 1) + return nil + } + }, + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + want: &resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{}, + }), + NextSyncAt: &frozenTime, + }, + }, + { + title: "Sync_release_stop_success", + exr: sampleResourceWithState(resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{stepReleaseStop}, + }), + }, "LOG", "firehose"), + kubeDeploy: func(t *testing.T) kubeDeployFn { + t.Helper() + return func(ctx context.Context, isCreate bool, conf kube.Config, hc helm.ReleaseConfig) error { + assert.False(t, isCreate) + assert.Equal(t, hc.Values["replicaCount"], 0) + return nil + } + }, + kubeGetPod: func(t *testing.T) kubeGetPodFn { + t.Helper() + return func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + assert.Equal(t, ns, "firehose") + assert.Equal(t, labels["app"], "firehose-foo-fh1") + return []kube.Pod{ + { + Name: "foo-1", + Containers: []string{"firehose"}, + }, + }, nil + } + }, + want: &resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{}), + ModuleData: modules.MustJSON(transientData{ + PendingSteps: []string{}, + }), + NextSyncAt: &frozenTime, + }, + }, + } + + for _, tt := range table { + t.Run(tt.title, func(t *testing.T) { + fd := &firehoseDriver{ + conf: defaultDriverConf, + timeNow: func() time.Time { return frozenTime }, + } + + if tt.kubeGetPod != nil { + fd.kubeGetPod = tt.kubeGetPod(t) + } + + if tt.kubeDeploy != nil { + fd.kubeDeploy = tt.kubeDeploy(t) + } + + if tt.kubeGetDeployment != nil { + fd.kubeGetDeployment = tt.kubeGetDeployment(t) + } + + got, err := fd.Sync(context.Background(), tt.exr) + if tt.wantErr != nil { + require.Error(t, err) + assert.True(t, errors.Is(err, tt.wantErr), "wantErr=%v\ngotErr=%v", tt.wantErr, err) + } else { + assert.NoError(t, err) + require.NotNil(t, got) + + wantJSON := string(modules.MustJSON(tt.want)) + gotJSON := string(modules.MustJSON(got)) + assert.JSONEq(t, wantJSON, gotJSON) + } + }) + } +} + +func sampleResourceWithState(state resource.State, sinkType, namespace string) module.ExpandedResource { + return module.ExpandedResource{ + Resource: resource.Resource{ + URN: "urn:goto:entropy:foo:fh1", + Kind: "firehose", + Name: "fh1", + Project: "foo", + Spec: resource.Spec{ + Configs: modules.MustJSON(map[string]any{ + "replicas": 1, + "namespace": namespace, + "deployment_id": "firehose-foo-fh1", + "telegraf": map[string]any{ + "enabled": false, + }, + "chart_values": map[string]string{ + "chart_version": "0.1.3", + "image_pull_policy": "IfNotPresent", + "image_tag": "latest", + }, + "env_variables": map[string]string{ + "SINK_TYPE": sinkType, + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + }), + }, + State: state, + }, + Dependencies: map[string]module.ResolvedDependency{ + "kube_cluster": { + Kind: "kubernetes", + Output: modules.MustJSON(kubernetes.Output{}), + }, + }, + } +} diff --git a/modules/firehose/driver_test.go b/modules/firehose/driver_test.go new file mode 100644 index 00000000..0f067234 --- /dev/null +++ b/modules/firehose/driver_test.go @@ -0,0 +1,700 @@ +package firehose + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/helm" +) + +func TestFirehoseDriver(t *testing.T) { + t.Parallel() + + table := []struct { + title string + res resource.Resource + kubeOutput kubernetes.Output + want *helm.ReleaseConfig + wantErr error + }{ + { + title: "LOG_Sink", + res: resource.Resource{ + URN: "orn:entropy:firehose:project-1:resource-1-firehose", + Kind: "firehose", + Name: "resource-1", + Project: "project-1", + Labels: map[string]string{ + "team": "team-1", + }, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + UpdatedBy: "john.doe@goto.com", + CreatedBy: "john.doe@goto.com", + Spec: resource.Spec{ + Configs: []byte(`{ + "env_variables": { + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "foo-log" + }, + "replicas": 1 + }`), + Dependencies: map[string]string{}, + }, + State: resource.State{ + Status: resource.StatusPending, + Output: nil, + }, + }, + kubeOutput: kubernetes.Output{ + Tolerations: map[string][]kubernetes.Toleration{ + "firehose_LOG": { + { + Key: "key1", + Operator: "Equal", + Value: "value1", + Effect: "NoSchedule", + }, + }, + "firehose_BIGQUERY": { + { + Key: "key2", + Operator: "Equal", + Value: "value2", + Effect: "NoSchedule", + }, + }, + }, + }, + want: &helm.ReleaseConfig{ + Name: "project-1-resource-1-firehose", + Repository: "https://goto.github.io/charts/", + Chart: "firehose", + Version: "0.1.13", + Namespace: "namespace-1", + Timeout: 60, + Wait: true, + ForceUpdate: true, + Values: map[string]any{ + "firehose": map[string]any{ + "config": map[string]any{ + "DEFAULT_KEY_IN_FIREHOSE_MODULE_1": "default-key-in-firehose-module-value_1", + "DEFAULT_KEY_IN_FIREHOSE_MODULE_2": "default-key-in-firehose-module-value_2", + "DLQ_GCS_CREDENTIAL_PATH": "/etc/secret/dlq_gcs_auth.json", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar", + "SINK_BIGQUERY_CREDENTIAL_PATH": "/etc/secret/bigquery_auth.json", + "SINK_BIGTABLE_CREDENTIAL_PATH": "/etc/secret/gcs_auth.json", + "SINK_BLOB_GCS_CREDENTIAL_PATH": "/etc/secret/gcs_auth.json", + "SINK_TYPE": "LOG", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz", + "SOURCE_KAFKA_TOPIC": "foo-log", + }, + "image": map[string]any{ + "pullPolicy": "IfNotPresent", + "repository": "gotocompany/firehose", + "tag": "0.8.1", + }, + "resources": map[string]any{ + "limits": map[string]any{ + "cpu": "6000m", + "memory": "6000Mi", + }, + "requests": map[string]any{ + "cpu": "600m", + "memory": "2500Mi", + }, + }, + }, + "init-firehose": map[string]any{ + "enabled": true, + "image": map[string]any{ + "repository": "busybox", + "pullPolicy": "IfNotPresent", + "tag": "latest", + }, + "command": []string{"cmd1", "--a"}, + "args": []string{"arg1", "arg2"}, + }, + "labels": map[string]string{ + "deployment": "project-1-resource-1-firehose", + "team": "team-1", + "orchestrator": "entropy", + }, + "mountSecrets": []map[string]string{ + { + "key": "gcs_credential", + "path": "gcs_auth.json", + "value": "gcs-credential", + }, + { + "key": "dlq_gcs_credential", + "path": "dlq_gcs_auth.json", + "value": "dlq-gcs-credential", + }, + { + "key": "bigquery_credential", + "path": "bigquery_auth.json", + "value": "big-query-credential", + }, + }, + "nodeAffinityMatchExpressions": map[string]any{ + "preferredDuringSchedulingIgnoredDuringExecution": []kubernetes.WeightedPreference{ + { + Weight: 1, + Preference: []kubernetes.Preference{ + { + Key: "another-node-label-key", + Operator: "In", + Values: []string{"another-node-label-value"}, + }, + }, + }, + }, + "requiredDuringSchedulingIgnoredDuringExecution": []kubernetes.Preference{ + { + Key: "topology.kubernetes.io/zone", + Operator: "In", + Values: []string{"antarctica-east1", "antarctica-west1"}, + }, + }, + }, + "replicaCount": 1, + "telegraf": map[string]any{ + "enabled": true, + "image": map[string]string{ + "pullPolicy": "IfNotPresent", + "repository": "telegraf", + "tag": "1.18.0-alpine", + }, + "config": map[string]any{ + "output": map[string]any{ + "prometheus_remote_write": map[string]any{ + "enabled": true, + "url": "http://goto.namespace-1.com", + }, + }, + "additional_global_tags": map[string]string{ + "app": "orn:entropy:firehose:project-1:resource-1-firehose", + }, + }, + }, + "tolerations": []map[string]any{ + { + "key": "key1", + "operator": "Equal", + "value": "value1", + "effect": "NoSchedule", + }, + }, + }, + }, + wantErr: nil, + }, + { + title: "BIGQUERY_Sink", + res: resource.Resource{ + URN: "orn:entropy:firehose:project-1:resource-2-firehose", + Kind: "firehose", + Name: "resource-2", + Project: "project-1", + Labels: map[string]string{ + "team": "team-2", + }, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + UpdatedBy: "john.doe2@goto.com", + CreatedBy: "john.doe2@goto.com", + Spec: resource.Spec{ + Configs: []byte(`{ + "env_variables": { + "SINK_TYPE": "BIGQUERY", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar-2", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz-2", + "SOURCE_KAFKA_BROKERS": "localhost:9093", + "SOURCE_KAFKA_TOPIC": "foo-log-2" + }, + "replicas": 2 + }`), + Dependencies: map[string]string{}, + }, + State: resource.State{ + Status: resource.StatusPending, + Output: nil, + }, + }, + kubeOutput: kubernetes.Output{ + Tolerations: map[string][]kubernetes.Toleration{ + "firehose_LOG": { + { + Key: "key1", + Operator: "Equal", + Value: "value1", + Effect: "NoSchedule", + }, + }, + "firehose_BIGQUERY": { + { + Key: "key2", + Operator: "Equal", + Value: "value2", + Effect: "NoSchedule", + }, + }, + }, + }, + want: &helm.ReleaseConfig{ + Name: "project-1-resource-2-firehose", + Repository: "https://goto.github.io/charts/", + Chart: "firehose", + Version: "0.1.13", + Namespace: "namespace-1", + Timeout: 60, + Wait: true, + ForceUpdate: true, + Values: map[string]any{ + "firehose": map[string]any{ + "config": map[string]any{ + "DEFAULT_KEY_IN_FIREHOSE_MODULE_1": "default-key-in-firehose-module-value_1", + "DEFAULT_KEY_IN_FIREHOSE_MODULE_2": "default-key-in-firehose-module-value_2", + "DLQ_GCS_CREDENTIAL_PATH": "/etc/secret/dlq_gcs_auth.json", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar-2", + "SINK_BIGQUERY_CREDENTIAL_PATH": "/etc/secret/bigquery_auth.json", + "SINK_BIGTABLE_CREDENTIAL_PATH": "/etc/secret/gcs_auth.json", + "SINK_BLOB_GCS_CREDENTIAL_PATH": "/etc/secret/gcs_auth.json", + "SINK_TYPE": "BIGQUERY", + "SOURCE_KAFKA_BROKERS": "localhost:9093", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz-2", + "SOURCE_KAFKA_TOPIC": "foo-log-2", + }, + "image": map[string]any{ + "pullPolicy": "IfNotPresent", + "repository": "gotocompany/firehose", + "tag": "0.8.1", + }, + "resources": map[string]any{ + "limits": map[string]any{ + "cpu": "6000m", + "memory": "20000Mi", + }, + "requests": map[string]any{ + "cpu": "300m", + "memory": "2000Mi", + }, + }, + }, + "init-firehose": map[string]any{ + "enabled": true, + "image": map[string]any{ + "repository": "busybox", + "pullPolicy": "IfNotPresent", + "tag": "latest", + }, + "command": []string{"cmd1", "--a"}, + "args": []string{"arg1", "arg2"}, + }, + "labels": map[string]string{ + "deployment": "project-1-resource-2-firehose", + "team": "team-2", + "orchestrator": "entropy", + }, + "mountSecrets": []map[string]string{ + { + "key": "gcs_credential", + "path": "gcs_auth.json", + "value": "gcs-credential", + }, + { + "key": "dlq_gcs_credential", + "path": "dlq_gcs_auth.json", + "value": "dlq-gcs-credential", + }, + { + "key": "bigquery_credential", + "path": "bigquery_auth.json", + "value": "big-query-credential", + }, + }, + "nodeAffinityMatchExpressions": map[string]any{ + "preferredDuringSchedulingIgnoredDuringExecution": []kubernetes.WeightedPreference{ + { + Weight: 1, + Preference: []kubernetes.Preference{ + { + Key: "another-node-label-key", + Operator: "In", + Values: []string{"another-node-label-value"}, + }, + }, + }, + }, + "requiredDuringSchedulingIgnoredDuringExecution": []kubernetes.Preference{ + { + Key: "topology.kubernetes.io/zone", + Operator: "In", + Values: []string{"antarctica-east1", "antarctica-west1"}, + }, + }, + }, + "replicaCount": 2, + "telegraf": map[string]any{ + "enabled": true, + "image": map[string]string{ + "pullPolicy": "IfNotPresent", + "repository": "telegraf", + "tag": "1.18.0-alpine", + }, + "config": map[string]any{ + "output": map[string]any{ + "prometheus_remote_write": map[string]any{ + "enabled": true, + "url": "http://goto.namespace-1.com", + }, + }, + "additional_global_tags": map[string]string{ + "app": "orn:entropy:firehose:project-1:resource-2-firehose", + }, + }, + }, + "tolerations": []map[string]any{ + { + "key": "key2", + "operator": "Equal", + "value": "value2", + "effect": "NoSchedule", + }, + }, + }, + }, + wantErr: nil, + }, + { + title: "BLOB_Sink", + res: resource.Resource{ + URN: "orn:entropy:firehose:project-1:resource-3-firehose", + Kind: "firehose", + Name: "resource-3", + Project: "project-1", + Labels: map[string]string{ + "team": "team-3", + }, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + UpdatedBy: "john.doe3@goto.com", + CreatedBy: "john.doe3@goto.com", + Spec: resource.Spec{ + Configs: []byte(`{ + "env_variables": { + "SINK_TYPE": "BLOB", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar-3", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz-3", + "SOURCE_KAFKA_BROKERS": "localhost:9094", + "SOURCE_KAFKA_TOPIC": "foo-log-3" + }, + "replicas": 3 + }`), + Dependencies: map[string]string{}, + }, + State: resource.State{ + Status: resource.StatusPending, + Output: nil, + }, + }, + kubeOutput: kubernetes.Output{ + Tolerations: map[string][]kubernetes.Toleration{ + "firehose_LOG": { + { + Key: "key1", + Operator: "Equal", + Value: "value1", + Effect: "NoSchedule", + }, + }, + "firehose_BIGQUERY": { + { + Key: "key2", + Operator: "Equal", + Value: "value2", + Effect: "NoSchedule", + }, + }, + "firehose_BLOB": { + { + Key: "key3", + Operator: "Equal", + Value: "value3", + Effect: "NoSchedule", + }, + }, + }, + }, + want: &helm.ReleaseConfig{ + Name: "project-1-resource-3-firehose", + Repository: "https://goto.github.io/charts/", + Chart: "firehose", + Version: "0.1.13", + Namespace: "namespace-1", + Timeout: 60, + Wait: true, + ForceUpdate: true, + Values: map[string]any{ + "firehose": map[string]any{ + "config": map[string]any{ + "DEFAULT_KEY_IN_FIREHOSE_MODULE_1": "default-key-in-firehose-module-value_1", + "DEFAULT_KEY_IN_FIREHOSE_MODULE_2": "default-key-in-firehose-module-value_2", + "DLQ_GCS_CREDENTIAL_PATH": "/etc/secret/dlq_gcs_auth.json", + "INPUT_SCHEMA_PROTO_CLASS": "com.foo.Bar-3", + "SINK_BIGQUERY_CREDENTIAL_PATH": "/etc/secret/bigquery_auth.json", + "SINK_BIGTABLE_CREDENTIAL_PATH": "/etc/secret/gcs_auth.json", + "SINK_BLOB_GCS_CREDENTIAL_PATH": "/etc/secret/gcs_auth.json", + "SINK_TYPE": "BLOB", + "SOURCE_KAFKA_BROKERS": "localhost:9094", + "SOURCE_KAFKA_CONSUMER_GROUP_ID": "foo-bar-baz-3", + "SOURCE_KAFKA_TOPIC": "foo-log-3", + }, + "image": map[string]any{ + "pullPolicy": "IfNotPresent", + "repository": "gotocompany/firehose", + "tag": "0.8.1", + }, + "resources": map[string]any{ + "limits": map[string]any{ + "cpu": "6000m", + "memory": "20000Mi", + }, + "requests": map[string]any{ + "cpu": "300m", + "memory": "2000Mi", + }, + }, + }, + "init-firehose": map[string]any{ + "enabled": true, + "image": map[string]any{ + "repository": "busybox", + "pullPolicy": "IfNotPresent", + "tag": "latest", + }, + "command": []string{"cmd1", "--a"}, + "args": []string{"arg1", "arg2"}, + }, + "labels": map[string]string{ + "deployment": "project-1-resource-3-firehose", + "team": "team-3", + "orchestrator": "entropy", + }, + "mountSecrets": []map[string]string{ + { + "key": "gcs_credential", + "path": "gcs_auth.json", + "value": "gcs-credential", + }, + { + "key": "dlq_gcs_credential", + "path": "dlq_gcs_auth.json", + "value": "dlq-gcs-credential", + }, + { + "key": "bigquery_credential", + "path": "bigquery_auth.json", + "value": "big-query-credential", + }, + }, + "nodeAffinityMatchExpressions": map[string]any{ + "preferredDuringSchedulingIgnoredDuringExecution": []kubernetes.WeightedPreference{ + { + Weight: 1, + Preference: []kubernetes.Preference{ + { + Key: "another-node-label-key", + Operator: "In", + Values: []string{"another-node-label-value"}, + }, + }, + }, + }, + "requiredDuringSchedulingIgnoredDuringExecution": []kubernetes.Preference{ + { + Key: "topology.kubernetes.io/zone", + Operator: "In", + Values: []string{"antarctica-east1", "antarctica-west1"}, + }, + }, + }, + "replicaCount": 3, + "telegraf": map[string]any{ + "enabled": true, + "image": map[string]string{ + "pullPolicy": "IfNotPresent", + "repository": "telegraf", + "tag": "1.18.0-alpine", + }, + "config": map[string]any{ + "output": map[string]any{ + "prometheus_remote_write": map[string]any{ + "enabled": true, + "url": "http://goto.namespace-1.com", + }, + }, + "additional_global_tags": map[string]string{ + "app": "orn:entropy:firehose:project-1:resource-3-firehose", + }, + }, + }, + "tolerations": []map[string]any{ + { + "key": "key3", + "operator": "Equal", + "value": "value3", + "effect": "NoSchedule", + }, + }, + }, + }, + wantErr: nil, + }, + } + + for _, tt := range table { + t.Run(tt.title, func(t *testing.T) { + fd := &firehoseDriver{ + conf: firehoseDriverConf(), + timeNow: func() time.Time { return frozenTime }, + } + + conf, _ := readConfig(tt.res, tt.res.Spec.Configs, fd.conf) + chartVals, _ := mergeChartValues(&fd.conf.ChartValues, conf.ChartValues) + + conf.Telegraf = fd.conf.Telegraf + conf.ChartValues = chartVals + + got, err := fd.getHelmRelease(tt.res, *conf, tt.kubeOutput) + if tt.wantErr != nil { + require.Error(t, err) + assert.True(t, errors.Is(err, tt.wantErr), "wantErr=%v\ngotErr=%v", tt.wantErr, err) + } else { + assert.NoError(t, err) + require.NotNil(t, got) + + wantJSON := string(modules.MustJSON(tt.want)) + gotJSON := string(modules.MustJSON(got)) + assert.JSONEq(t, wantJSON, gotJSON) + } + }) + } +} + +func firehoseDriverConf() driverConf { + return driverConf{ + KubeDeployTimeout: 60, + NodeAffinityMatchExpressions: kubernetes.NodeAffinityMatchExpressions{ + RequiredDuringSchedulingIgnoredDuringExecution: []kubernetes.Preference{ + { + Key: "topology.kubernetes.io/zone", + Operator: "In", + Values: []string{"antarctica-east1", "antarctica-west1"}, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []kubernetes.WeightedPreference{ + { + Weight: 1, + Preference: []kubernetes.Preference{ + { + Key: "another-node-label-key", + Operator: "In", + Values: []string{"another-node-label-value"}, + }, + }, + }, + }, + }, + EnvVariables: map[string]string{ + "DEFAULT_KEY_IN_FIREHOSE_MODULE_1": "default-key-in-firehose-module-value_1", + "DEFAULT_KEY_IN_FIREHOSE_MODULE_2": "default-key-in-firehose-module-value_2", + }, + ChartValues: ChartValues{ + ChartVersion: "0.1.13", + ImageRepository: "gotocompany/firehose", + ImageTag: "0.8.1", + ImagePullPolicy: "IfNotPresent", + }, + BigQuerySinkCredential: "big-query-credential", + GCSSinkCredential: "gcs-credential", + DLQGCSSinkCredential: "dlq-gcs-credential", + InitContainer: InitContainer{ + Args: []string{"arg1", "arg2"}, + Command: []string{"cmd1", "--a"}, + Enabled: true, + ImageTag: "latest", + PullPolicy: "IfNotPresent", + Repository: "busybox", + }, + Labels: map[string]string{ + "team": "{{.team}}", + }, + Namespace: map[string]string{ + "default": "namespace-1", + }, + RequestsAndLimits: map[string]RequestsAndLimits{ + "BIGQUERY": { + Limits: UsageSpec{ + CPU: "6000m", + Memory: "20000Mi", + }, + Requests: UsageSpec{ + CPU: "300m", + Memory: "2000Mi", + }, + }, + "BLOB": { + Limits: UsageSpec{ + CPU: "6000m", + Memory: "20000Mi", + }, + Requests: UsageSpec{ + CPU: "300m", + Memory: "2000Mi", + }, + }, + "default": { + Limits: UsageSpec{ + CPU: "6000m", + Memory: "6000Mi", + }, + Requests: UsageSpec{ + CPU: "600m", + Memory: "2500Mi", + }, + }, + }, + Telegraf: &Telegraf{ + Enabled: true, + Image: map[string]any{ + "pullPolicy": "IfNotPresent", + "repository": "telegraf", + "tag": "1.18.0-alpine", + }, + Config: TelegrafConf{ + Output: map[string]any{ + "prometheus_remote_write": map[string]any{ + "enabled": true, + "url": "http://goto.{{ .namespace }}.com", + }, + }, + AdditionalGlobalTags: map[string]string{ + "app": "{{ .urn }}", + }, + }, + }, + } +} diff --git a/modules/firehose/kafka/consumer.go b/modules/firehose/kafka/consumer.go deleted file mode 100644 index 4a6a0520..00000000 --- a/modules/firehose/kafka/consumer.go +++ /dev/null @@ -1,61 +0,0 @@ -package kafka - -import ( - "context" - - "github.com/odpf/entropy/pkg/kube" -) - -const ( - kafkaImage = "bitnami/kafka:2.0.0" - retries = 6 -) - -type ConsumerGroupManager struct { - brokers string - kube *kube.Client - namespace string -} - -func NewConsumerGroupManager(brokers string, kube *kube.Client, namespace string) *ConsumerGroupManager { - return &ConsumerGroupManager{ - brokers: brokers, - kube: kube, - namespace: namespace, - } -} - -func (k ConsumerGroupManager) ResetOffsetToDatetime(ctx context.Context, consumerID string, datetime string) error { - return k.kube.RunJob(ctx, k.namespace, - getJobName(consumerID), - kafkaImage, - append(k.getDefaultCMD(consumerID), "--to-datetime", datetime), - retries, - ) -} - -func (k ConsumerGroupManager) ResetOffsetToLatest(ctx context.Context, consumerID string) error { - return k.kube.RunJob(ctx, k.namespace, - getJobName(consumerID), - kafkaImage, - append(k.getDefaultCMD(consumerID), "--to-latest"), - retries, - ) -} - -func (k ConsumerGroupManager) ResetOffsetToEarliest(ctx context.Context, consumerID string) error { - return k.kube.RunJob(ctx, k.namespace, - getJobName(consumerID), - kafkaImage, - append(k.getDefaultCMD(consumerID), "--to-earliest"), - retries, - ) -} - -func (k ConsumerGroupManager) getDefaultCMD(consumerID string) []string { - return []string{"kafka-consumer-groups.sh", "--bootstrap-server", k.brokers, "--group", consumerID, "--reset-offsets", "--execute", "--all-topics"} -} - -func getJobName(consumerID string) string { - return consumerID + "-reset" -} diff --git a/modules/firehose/keda.go b/modules/firehose/keda.go new file mode 100644 index 00000000..64cd5c46 --- /dev/null +++ b/modules/firehose/keda.go @@ -0,0 +1,275 @@ +package firehose + +import ( + "fmt" + "maps" + "strings" + + "github.com/goto/entropy/modules" + "github.com/goto/entropy/pkg/errors" +) + +type Scaler string + +const ( + KAFKA Scaler = "kafka" + PROMETHEUS Scaler = "prometheus" +) + +const ( + KedaPausedAnnotationKey = "autoscaling.keda.sh/paused" + KedaPausedReplicaAnnotationKey = "autoscaling.keda.sh/paused-replicas" + + KedaKafkaMetadataBootstrapServersKey = "bootstrapServers" + KedaKafkaMetadataTopicKey = "topic" + KedaKafkaMetadataConsumerGroupKey = "consumerGroup" + + KafkaTopicDelimiter = "|" +) + +type Keda struct { + Paused bool `json:"paused,omitempty"` + PausedWithReplica bool `json:"paused_with_replica,omitempty"` + PausedReplica int `json:"paused_replica,omitempty"` + MinReplicas int `json:"min_replicas"` + MaxReplicas int `json:"max_replicas"` + PollingInterval int `json:"polling_interval,omitempty"` + CooldownPeriod int `json:"cooldown_period,omitempty"` + Triggers map[string]Trigger `json:"triggers,omitempty"` + RestoreToOriginalReplica bool `json:"restore_to_original_replica_count,omitempty"` + Fallback *Fallback `json:"fallback,omitempty"` + HPA *HorizontalPodAutoscaler `json:"hpa,omitempty"` +} + +type Trigger struct { + Type Scaler `json:"type,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + AuthenticationRef AuthenticationRef `json:"authentication_ref,omitempty"` +} + +type AuthenticationRef struct { + Name string `json:"name,omitempty" validate:"required"` + Kind string `json:"kind,omitempty"` +} + +type Fallback struct { + Behavior string `json:"behavior,omitempty"` + Replicas int `json:"replicas,omitempty"` + FailureThreshold int `json:"failure_threshold,omitempty"` +} + +type HorizontalPodAutoscaler struct { + ScaleDown ScaleBehaviour `json:"scale_down,omitempty"` + ScaleUp ScaleBehaviour `json:"scale_up,omitempty"` +} + +type ScaleBehaviour struct { + Policies []Policy `json:"policies,omitempty"` + StabilizationWindowSeconds *int `json:"stabilization_window_seconds,omitempty"` + Tolerance *float32 `json:"tolerance,omitempty"` +} + +type Policy struct { + Type string `json:"type,omitempty"` + Value float32 `json:"value,omitempty"` + PeriodSeconds int `json:"period_seconds,omitempty"` +} + +func (keda *Keda) ReadConfig(cfg Config, driverCfg driverConf) error { + kedaConfig := Keda{} + + defaultConfig, ok := driverCfg.Autoscaler.Keda[defaultKey] + if ok { + kedaConfig = defaultConfig + } + + sinkType := cfg.EnvVariables[confSinkType] + SinkConfig, ok := driverCfg.Autoscaler.Keda[sinkType] + if ok { + kedaConfig = SinkConfig + } + + kedaConfig.updateTriggersMetadata(cfg.EnvVariables) + + kedaConfig.MinReplicas = keda.MinReplicas + kedaConfig.MaxReplicas = keda.MaxReplicas + + if keda.Fallback != nil && keda.Fallback.Behavior != "" { + kedaConfig.Fallback = keda.Fallback + } + + if keda.HPA != nil { + kedaConfig.HPA = keda.HPA + } + + kedaConfig.Paused = keda.Paused + kedaConfig.PausedWithReplica = keda.PausedWithReplica + kedaConfig.PausedReplica = keda.PausedReplica + + *keda = kedaConfig + return nil +} + +func (keda *Keda) Pause(replica ...int) { + if len(replica) == 0 { + keda.Paused = true + } + if len(replica) > 0 { + keda.PausedWithReplica = true + keda.PausedReplica = replica[0] + } +} + +func (keda *Keda) Resume() { + keda.Paused = false + keda.PausedWithReplica = false +} + +func (keda *Keda) GetHelmValues(cfg Config) (map[string]any, error) { + annotations := make(map[string]string) + if keda.Paused { + annotations[KedaPausedAnnotationKey] = "true" + } + if keda.PausedWithReplica { + annotations[KedaPausedReplicaAnnotationKey] = fmt.Sprint(keda.PausedReplica) + } + + var firehoseConfigs = map[string]string{ + "namespace": cfg.Namespace, + "replicas": fmt.Sprint(cfg.Replicas), + } + var triggers []map[string]any + for _, trigger := range keda.Triggers { + renderedMetadata, err := renderTpl(trigger.Metadata, modules.CloneAndMergeMaps(firehoseConfigs, cfg.EnvVariables)) + if err != nil { + return nil, err + } + trigger.Metadata = renderedMetadata + + topicMetadata, topicMetadataExists := trigger.Metadata[KedaKafkaMetadataTopicKey] + if trigger.Type == KAFKA && + topicMetadataExists && + strings.Contains(topicMetadata, KafkaTopicDelimiter) { + topics := strings.Split(topicMetadata, KafkaTopicDelimiter) + for _, topic := range topics { + metadata := maps.Clone(trigger.Metadata) + metadata[KedaKafkaMetadataTopicKey] = topic + triggers = append(triggers, map[string]any{ + "type": trigger.Type, + "metadata": metadata, + "authenticationRef": map[string]any{ + "name": trigger.AuthenticationRef.Name, + "kind": trigger.AuthenticationRef.Kind, + }, + }) + } + continue + } + + triggers = append(triggers, map[string]any{ + "type": trigger.Type, + "metadata": trigger.Metadata, + "authenticationRef": map[string]any{ + "name": trigger.AuthenticationRef.Name, + "kind": trigger.AuthenticationRef.Kind, + }, + }) + } + + var hpa map[string]any + if keda.HPA != nil { + var scaleUpPolicy []map[string]any + for _, policy := range keda.HPA.ScaleUp.Policies { + scaleUpPolicy = append(scaleUpPolicy, map[string]any{ + "type": policy.Type, + "value": policy.Value, + "periodSeconds": policy.PeriodSeconds, + }) + } + + var scaleDownPolicy []map[string]any + for _, policy := range keda.HPA.ScaleDown.Policies { + scaleDownPolicy = append(scaleDownPolicy, map[string]any{ + "type": policy.Type, + "value": policy.Value, + "periodSeconds": policy.PeriodSeconds, + }) + } + + hpa = map[string]any{ + "scaleUp": map[string]any{ + "policies": scaleUpPolicy, + "stabilizationWindowSeconds": keda.HPA.ScaleUp.StabilizationWindowSeconds, + "tolerance": keda.HPA.ScaleUp.Tolerance, + }, + "scaleDown": map[string]any{ + "policies": scaleDownPolicy, + "stabilizationWindowSeconds": keda.HPA.ScaleDown.StabilizationWindowSeconds, + "tolerance": keda.HPA.ScaleDown.Tolerance, + }, + } + } + + var fallback map[string]any + if keda.Fallback != nil { + fallback = map[string]any{ + "behavior": keda.Fallback.Behavior, + "failureThreshold": keda.Fallback.FailureThreshold, + "replicas": keda.Fallback.Replicas, + } + } + + return map[string]any{ + "annotations": annotations, + "maxReplicaCount": keda.MaxReplicas, + "minReplicaCount": keda.MinReplicas, + "pollingInterval": keda.PollingInterval, + "cooldownPeriod": keda.CooldownPeriod, + "restoreToOriginalReplicaCount": keda.RestoreToOriginalReplica, + "fallback": fallback, + "triggers": triggers, + "hpa": hpa, + }, nil +} + +func (keda *Keda) updateTriggersMetadata(cfg map[string]string) error { + for key, trigger := range keda.Triggers { + switch trigger.Type { + case KAFKA: + if _, ok := cfg[confKeyConsumerID]; ok { + trigger.Metadata[KedaKafkaMetadataConsumerGroupKey] = cfg[confKeyConsumerID] + } + if _, ok := cfg[confKeyKafkaTopic]; ok { + trigger.Metadata[KedaKafkaMetadataTopicKey] = cfg[confKeyKafkaTopic] + } + if _, ok := cfg[confKeyKafkaBrokers]; ok { + trigger.Metadata[KedaKafkaMetadataBootstrapServersKey] = cfg[confKeyKafkaBrokers] + } + } + keda.Triggers[key] = trigger + } + return nil +} + +func (keda *Keda) Validate() error { + if keda.MinReplicas == 0 && keda.MaxReplicas == 0 { + return errors.ErrInvalid.WithMsgf("min_replicas and max_replicas must be set when autoscaler is enabled") + } + + if keda.MinReplicas < 0 { + return errors.ErrInvalid.WithMsgf("min_replicas must be greater than or equal to 0") + } + + if keda.MaxReplicas < 1 { + return errors.ErrInvalid.WithMsgf("max_replicas must be greater than or equal to 1") + } + + if keda.MinReplicas > keda.MaxReplicas { + return errors.ErrInvalid.WithMsgf("min_replicas must be less than or equal to max_replicas") + } + + if len(keda.Triggers) == 0 { + return errors.ErrInvalid.WithMsgf("at least one trigger must be defined when autoscaler is enabled") + } + return nil +} diff --git a/modules/firehose/keda_test.go b/modules/firehose/keda_test.go new file mode 100644 index 00000000..b1540e8c --- /dev/null +++ b/modules/firehose/keda_test.go @@ -0,0 +1,129 @@ +package firehose + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKeda_Validate(t *testing.T) { + tests := []struct { + name string + keda Keda + wantErr bool + errMsg string + }{ + { + name: "Empty config should error", + keda: Keda{}, + wantErr: true, + errMsg: "min_replicas and max_replicas must be set when autoscaler is enabled", + }, + { + name: "Invalid min replicas", + keda: Keda{ + MinReplicas: -1, + MaxReplicas: 5, + Triggers: map[string]Trigger{ + "test": {Type: KAFKA}, + }, + }, + wantErr: true, + errMsg: "min_replicas must be greater than or equal to 0", + }, + { + name: "Invalid max replicas", + keda: Keda{ + MinReplicas: 1, + MaxReplicas: 0, + Triggers: map[string]Trigger{ + "test": {Type: KAFKA}, + }, + }, + wantErr: true, + errMsg: "max_replicas must be greater than or equal to 1", + }, + { + name: "Min greater than max", + keda: Keda{ + MinReplicas: 5, + MaxReplicas: 3, + Triggers: map[string]Trigger{ + "test": {Type: KAFKA}, + }, + }, + wantErr: true, + errMsg: "min_replicas must be less than or equal to max_replicas", + }, + { + name: "No triggers defined", + keda: Keda{ + MinReplicas: 1, + MaxReplicas: 3, + }, + wantErr: true, + errMsg: "at least one trigger must be defined when autoscaler is enabled", + }, + { + name: "Valid config", + keda: Keda{ + MinReplicas: 1, + MaxReplicas: 5, + Triggers: map[string]Trigger{ + "test": {Type: KAFKA}, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.keda.Validate() + if tt.wantErr { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errMsg) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestKeda_PauseResume(t *testing.T) { + tests := []struct { + name string + replica []int + wantKeda Keda + }{ + { + name: "Pause without replica", + replica: []int{}, + wantKeda: Keda{ + Paused: true, + }, + }, + { + name: "Pause with replica", + replica: []int{3}, + wantKeda: Keda{ + PausedWithReplica: true, + PausedReplica: 3, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + k := &Keda{} + k.Pause(tt.replica...) + assert.Equal(t, tt.wantKeda.Paused, k.Paused) + assert.Equal(t, tt.wantKeda.PausedWithReplica, k.PausedWithReplica) + assert.Equal(t, tt.wantKeda.PausedReplica, k.PausedReplica) + + k.Resume() + assert.False(t, k.Paused) + assert.False(t, k.PausedWithReplica) + }) + } +} diff --git a/modules/firehose/log.go b/modules/firehose/log.go deleted file mode 100644 index 921158ae..00000000 --- a/modules/firehose/log.go +++ /dev/null @@ -1,60 +0,0 @@ -package firehose - -import ( - "context" - "encoding/json" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/modules/kubernetes" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/kube" -) - -func (*firehoseModule) Log(ctx context.Context, res module.ExpandedResource, filter map[string]string) (<-chan module.LogChunk, error) { - r := res.Resource - - var conf moduleConfig - if err := json.Unmarshal(r.Spec.Configs, &conf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - - var kubeOut kubernetes.Output - if err := json.Unmarshal(res.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { - return nil, err - } - - if filter == nil { - filter = make(map[string]string) - } - - hc, err := conf.GetHelmReleaseConfig(r) - if err != nil { - return nil, err - } - - filter["app"] = hc.Name - - kubeCl := kube.NewClient(kubeOut.Configs) - logs, err := kubeCl.StreamLogs(ctx, hc.Namespace, filter) - if err != nil { - return nil, err - } - - mappedLogs := make(chan module.LogChunk) - go func() { - defer close(mappedLogs) - for { - select { - case log, ok := <-logs: - if !ok { - return - } - mappedLogs <- module.LogChunk{Data: log.Data, Labels: log.Labels} - case <-ctx.Done(): - return - } - } - }() - - return mappedLogs, err -} diff --git a/modules/firehose/module.go b/modules/firehose/module.go index 7c9a8385..7233ccd7 100644 --- a/modules/firehose/module.go +++ b/modules/firehose/module.go @@ -1,39 +1,37 @@ package firehose import ( + "context" _ "embed" "encoding/json" + "sync" + "time" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/modules/kubernetes" + "helm.sh/helm/v3/pkg/release" + v1 "k8s.io/api/core/v1" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/helm" + "github.com/goto/entropy/pkg/kafka" + "github.com/goto/entropy/pkg/kube" + "github.com/goto/entropy/pkg/validator" + "github.com/goto/entropy/pkg/worker" ) const ( - StopAction = "stop" - StartAction = "start" + keyKubeDependency = "kube_cluster" + ScaleAction = "scale" + StartAction = "start" + StopAction = "stop" ResetAction = "reset" + ResetV2Action = "reset-v2" UpgradeAction = "upgrade" ) -const ( - releaseCreate = "release_create" - releaseUpdate = "release_update" - consumerReset = "consumer_reset" -) - -const ( - stateRunning = "RUNNING" - stateStopped = "STOPPED" -) - -const ( - ResetToDateTime = "DATETIME" - ResetToEarliest = "EARLIEST" - ResetToLatest = "LATEST" -) - -const keyKubeDependency = "kube_cluster" +var mu sync.Mutex var Module = module.Descriptor{ Kind: "firehose", @@ -43,73 +41,135 @@ var Module = module.Descriptor{ Actions: []module.ActionDesc{ { Name: module.CreateAction, - Description: "Creates firehose instance.", - ParamSchema: completeConfigSchema, + Description: "Creates a new firehose", }, { Name: module.UpdateAction, - Description: "Updates an existing firehose instance.", - ParamSchema: completeConfigSchema, + Description: "Update all configurations of firehose", }, { - Name: ScaleAction, - Description: "Scale-up or scale-down an existing firehose instance.", - ParamSchema: scaleActionSchema, + Name: ResetAction, + Description: "Stop firehose, reset consumer group, restart", + }, + { + Name: ResetV2Action, + Description: "Stop firehose, reset consumer group, restart with datetime option", }, { Name: StopAction, - Description: "Stop firehose and all its components.", + Description: "Stop all replicas of this firehose.", }, { Name: StartAction, - Description: "Start firehose and all its components.", + Description: "Start the firehose if it is currently stopped.", }, { - Name: ResetAction, - Description: "Reset firehose kafka consumer group to given timestamp", - ParamSchema: resetActionSchema, + Name: ScaleAction, + Description: "Scale the number of replicas to given number.", }, { Name: UpgradeAction, - Description: "Upgrade firehose to current stable version", + Description: "Upgrade firehose version", }, }, - DriverFactory: func(conf json.RawMessage) (module.Driver, error) { - fm := firehoseModuleWithDefaultConfigs() - err := json.Unmarshal(conf, fm) - if err != nil { + DriverFactory: func(confJSON json.RawMessage) (module.Driver, error) { + mu.Lock() + defer mu.Unlock() + + conf := defaultDriverConf // clone the default value + if err := json.Unmarshal(confJSON, &conf); err != nil { + return nil, err + } else if err := validator.TaggedStruct(conf); err != nil { return nil, err } - return fm, nil + + return &firehoseDriver{ + conf: conf, + timeNow: time.Now, + kubeDeploy: func(_ context.Context, isCreate bool, kubeConf kube.Config, hc helm.ReleaseConfig) error { + canUpdate := func(rel *release.Release) bool { + curLabels, ok := rel.Config[labelsConfKey].(map[string]any) + if !ok { + return false + } + newLabels, ok := hc.Values[labelsConfKey].(map[string]string) + if !ok { + return false + } + + isManagedByEntropy := curLabels[labelOrchestrator] == orchestratorLabelValue + isSameDeployment := curLabels[labelDeployment] == newLabels[labelDeployment] + + return isManagedByEntropy && isSameDeployment + } + + helmCl := helm.NewClient(&helm.Config{Kubernetes: kubeConf}) + _, errHelm := helmCl.Upsert(&hc, canUpdate) + return errHelm + }, + kubeGetPod: func(ctx context.Context, conf kube.Config, ns string, labels map[string]string) ([]kube.Pod, error) { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("failed to create new kube client on firehose driver kube get pod").WithCausef(err.Error()) + } + return kubeCl.GetPodDetails(ctx, ns, labels, func(pod v1.Pod) bool { + // allow pods that are in running state and are not marked for deletion + return pod.Status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil + }) + }, + kubeGetDeployment: func(ctx context.Context, conf kube.Config, ns, name string) (kube.Deployment, error) { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return kube.Deployment{}, errors.ErrInternal.WithMsgf("failed to create new kube client on firehose driver kube get deployment").WithCausef(err.Error()) + } + return kubeCl.GetDeploymentDetails(ctx, ns, name) + }, + consumerReset: consumerReset, + }, nil }, } -type firehoseModule struct { - Config config `json:"config"` -} +func consumerReset(ctx context.Context, conf Config, out kubernetes.Output, resetTo string, offsetResetDelaySeconds int) error { + const ( + networkErrorRetryDuration = 5 * time.Second + kubeAPIRetryBackoffDuration = 30 * time.Second + contextCancellationBackoffDuration = 30 * time.Second + ) -type config struct { - ChartRepository string `json:"chart_repository,omitempty"` - ChartName string `json:"chart_name,omitempty"` - ChartVersion string `json:"chart_version,omitempty"` - ImageRepository string `json:"image_repository,omitempty"` - ImageName string `json:"image_name,omitempty"` - ImageTag string `json:"image_tag,omitempty"` - Namespace string `json:"namespace,omitempty"` - ImagePullPolicy string `json:"image_pull_policy,omitempty"` -} + var ( + errNetwork = worker.RetryableError{RetryAfter: networkErrorRetryDuration} + errKubeAPI = worker.RetryableError{RetryAfter: kubeAPIRetryBackoffDuration} + errResetContextCancellation = worker.RetryableError{RetryAfter: contextCancellationBackoffDuration} + ) -func firehoseModuleWithDefaultConfigs() *firehoseModule { - return &firehoseModule{ - config{ - ChartRepository: "https://odpf.github.io/charts/", - ChartName: "firehose", - ChartVersion: "0.1.3", - ImageRepository: "odpf/firehose", - ImageName: "firehose", - ImageTag: "latest", - Namespace: "firehose", - ImagePullPolicy: "IfNotPresent", - }, + brokerAddr := conf.EnvVariables[confKeyKafkaBrokers] + consumerID := conf.EnvVariables[confKeyConsumerID] + + kubeClient, err := kube.NewClient(ctx, out.Configs) + if err != nil { + return err } + + select { + case <-time.After(time.Duration(offsetResetDelaySeconds) * time.Second): + if err := kafka.DoReset(ctx, kubeClient, conf.Namespace, brokerAddr, consumerID, resetTo, conf.DeploymentID); err != nil { + switch { + case errors.Is(err, kube.ErrJobCreationFailed): + return errNetwork.WithCause(err) + + case errors.Is(err, kube.ErrJobNotFound): + return errKubeAPI.WithCause(err) + + case errors.Is(err, kube.ErrJobExecutionFailed): + return errKubeAPI.WithCause(err) + + default: + return err + } + } + case <-ctx.Done(): + return errResetContextCancellation.WithCause(errors.New("context cancelled while reset")) + } + + return nil } diff --git a/modules/firehose/module_test.go b/modules/firehose/module_test.go new file mode 100644 index 00000000..9e57a670 --- /dev/null +++ b/modules/firehose/module_test.go @@ -0,0 +1,24 @@ +package firehose + +import ( + "os" + "testing" + + "encoding/json" +) + +func BenchmarkDriverFactory(b *testing.B) { + b.SetParallelism(10000) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + configFile, err := os.ReadFile("./test/module-config.json") + if err != nil { + b.Fatalf("Failed to read file: %v", err) + } + + config := json.RawMessage(configFile) + + _, _ = Module.DriverFactory(config) + } + }) +} diff --git a/modules/firehose/output.go b/modules/firehose/output.go deleted file mode 100644 index 2dcb5f37..00000000 --- a/modules/firehose/output.go +++ /dev/null @@ -1,77 +0,0 @@ -package firehose - -import ( - "context" - "encoding/json" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/modules/kubernetes" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/kube" -) - -type Output struct { - Namespace string `json:"namespace,omitempty"` - ReleaseName string `json:"release_name,omitempty"` - Pods []kube.Pod `json:"pods,omitempty"` - Defaults config `json:"defaults,omitempty"` -} - -func (out Output) JSON() []byte { - b, err := json.Marshal(out) - if err != nil { - panic(err) - } - return b -} - -func (m *firehoseModule) Output(ctx context.Context, res module.ExpandedResource) (json.RawMessage, error) { - var conf moduleConfig - if err := json.Unmarshal(res.Resource.Spec.Configs, &conf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - - var output Output - if err := json.Unmarshal(res.Resource.State.Output, &output); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid output json: %v", err) - } - - pods, err := m.podDetails(ctx, res) - if err != nil { - return nil, err - } - - hc, err := conf.GetHelmReleaseConfig(res.Resource) - if err != nil { - return nil, err - } - - return Output{ - Namespace: hc.Namespace, - ReleaseName: hc.Name, - Pods: pods, - Defaults: output.Defaults, - }.JSON(), nil -} - -func (*firehoseModule) podDetails(ctx context.Context, res module.ExpandedResource) ([]kube.Pod, error) { - r := res.Resource - - var conf moduleConfig - if err := json.Unmarshal(r.Spec.Configs, &conf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - - var kubeOut kubernetes.Output - if err := json.Unmarshal(res.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { - return nil, err - } - - hc, err := conf.GetHelmReleaseConfig(r) - if err != nil { - return nil, err - } - - kubeCl := kube.NewClient(kubeOut.Configs) - return kubeCl.GetPodDetails(ctx, hc.Namespace, map[string]string{"app": hc.Name}) -} diff --git a/modules/firehose/plan.go b/modules/firehose/plan.go deleted file mode 100644 index 66707f13..00000000 --- a/modules/firehose/plan.go +++ /dev/null @@ -1,160 +0,0 @@ -package firehose - -import ( - "context" - "encoding/json" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" -) - -func (m *firehoseModule) Plan(_ context.Context, res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { - switch act.Name { - case module.CreateAction: - return m.planCreate(res, act) - case ResetAction: - return m.planReset(res, act) - default: - return m.planChange(res, act) - } -} - -func (m *firehoseModule) planCreate(res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { - var plan module.Plan - r := res.Resource - - var reqConf moduleConfig - if err := json.Unmarshal(act.Params, &reqConf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - if err := reqConf.validateAndSanitize(res.Resource); err != nil { - return nil, err - } - - output := Output{ - Defaults: m.Config, - }.JSON() - - r.Spec.Configs = reqConf.JSON() - r.State = resource.State{ - Status: resource.StatusPending, - ModuleData: moduleData{ - PendingSteps: []string{releaseCreate}, - }.JSON(), - Output: output, - } - - plan.Resource = r - if reqConf.StopTime != nil { - plan.ScheduleRunAt = *reqConf.StopTime - } - plan.Reason = "firehose created" - return &plan, nil -} - -func (m *firehoseModule) planChange(res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { - var plan module.Plan - r := res.Resource - - var conf moduleConfig - if err := json.Unmarshal(r.Spec.Configs, &conf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - - switch act.Name { - case module.UpdateAction: - var reqConf moduleConfig - if err := json.Unmarshal(act.Params, &reqConf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - if err := reqConf.validateAndSanitize(r); err != nil { - return nil, err - } - conf = reqConf - - if conf.StopTime != nil { - plan.ScheduleRunAt = *conf.StopTime - } - plan.Reason = "firehose config updated" - - case ScaleAction: - var scaleParams struct { - Replicas int `json:"replicas"` - } - if err := json.Unmarshal(act.Params, &scaleParams); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - conf.Firehose.Replicas = scaleParams.Replicas - plan.Reason = "firehose scaled" - - case StartAction: - conf.State = stateRunning - plan.Reason = "firehose started" - - case StopAction: - conf.State = stateStopped - plan.Reason = "firehose stopped" - - case UpgradeAction: - var output Output - err := json.Unmarshal(res.State.Output, &output) - if err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid output json: %v", err) - } - - output.Defaults = m.Config - res.State.Output = output.JSON() - - plan.Reason = "firehose upgraded" - } - - r.Spec.Configs = conf.JSON() - r.State = resource.State{ - Status: resource.StatusPending, - Output: res.State.Output, - ModuleData: moduleData{ - PendingSteps: []string{releaseUpdate}, - }.JSON(), - } - plan.Resource = r - return &plan, nil -} - -func (*firehoseModule) planReset(res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { - r := res.Resource - - var conf moduleConfig - if err := json.Unmarshal(r.Spec.Configs, &conf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - - var resetParams struct { - To string `json:"to"` - Datetime string `json:"datetime"` - } - if err := json.Unmarshal(act.Params, &resetParams); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid action params json: %v", err) - } - - var resetTo string - switch resetParams.To { - case "DATETIME": - resetTo = resetParams.Datetime - default: - resetTo = resetParams.To - } - - r.Spec.Configs = conf.JSON() - r.State = resource.State{ - Status: resource.StatusPending, - Output: res.State.Output, - ModuleData: moduleData{ - PendingSteps: []string{releaseUpdate, consumerReset, releaseUpdate}, - ResetTo: resetTo, - StateOverride: stateStopped, - }.JSON(), - } - - return &module.Plan{Resource: r, Reason: "firehose consumer reset"}, nil -} diff --git a/modules/firehose/plan_test.go b/modules/firehose/plan_test.go deleted file mode 100644 index 0f215ac1..00000000 --- a/modules/firehose/plan_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package firehose - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/assert" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" -) - -func TestFirehoseModule_Plan(t *testing.T) { - t.Parallel() - - res := resource.Resource{ - URN: "orn:entropy:firehose:test", - Kind: "firehose", - Name: "test", - Project: "demo", - Spec: resource.Spec{ - Configs: []byte(`{"state":"RUNNING","firehose":{"replicas":1,"kafka_broker_address":"localhost:9092","kafka_topic":"test-topic","kafka_consumer_id":"test-consumer-id","env_variables":{}}}`), - }, - State: resource.State{}, - } - - table := []struct { - title string - res module.ExpandedResource - act module.ActionRequest - want *module.Plan - wantErr error - }{ - { - title: "InvalidConfiguration", - res: module.ExpandedResource{Resource: res}, - act: module.ActionRequest{ - Name: module.CreateAction, - Params: []byte(`{`), - }, - wantErr: errors.ErrInvalid, - }, - { - title: "ValidConfiguration", - res: module.ExpandedResource{Resource: res}, - act: module.ActionRequest{ - Name: module.CreateAction, - Params: []byte(`{"state":"RUNNING","firehose":{"replicas":1,"kafka_broker_address":"localhost:9092","kafka_topic":"test-topic","kafka_consumer_id":"test-consumer-id","env_variables":{}}}`), - }, - want: &module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:firehose:test", - Kind: "firehose", - Name: "test", - Project: "demo", - Spec: resource.Spec{ - Configs: []byte(`{"state":"RUNNING","stop_time":null,"telegraf":null,"firehose":{"replicas":1,"kafka_broker_address":"localhost:9092","kafka_topic":"test-topic","kafka_consumer_id":"test-consumer-id","env_variables":{}}}`), - }, - State: resource.State{ - Status: resource.StatusPending, - ModuleData: []byte(`{"pending_steps":["release_create"]}`), - Output: []byte(`{"defaults":{}}`), - }, - }, - Reason: "firehose created", - }, - }, - { - title: "InvalidActionParams", - res: module.ExpandedResource{Resource: res}, - act: module.ActionRequest{ - Name: ScaleAction, - Params: []byte(`{`), - }, - wantErr: errors.ErrInvalid, - }, - { - title: "ValidScaleRequest", - res: module.ExpandedResource{Resource: res}, - act: module.ActionRequest{ - Name: ScaleAction, - Params: []byte(`{"replicas": 5}`), - }, - want: &module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:firehose:test", - Kind: "firehose", - Name: "test", - Project: "demo", - Spec: resource.Spec{ - Configs: []byte(`{"state":"RUNNING","stop_time":null,"telegraf":null,"firehose":{"replicas":5,"kafka_broker_address":"localhost:9092","kafka_topic":"test-topic","kafka_consumer_id":"test-consumer-id","env_variables":{}}}`), - }, - State: resource.State{ - Status: resource.StatusPending, - ModuleData: []byte(`{"pending_steps":["release_update"]}`), - }, - }, - Reason: "firehose scaled", - }, - }, - { - title: "ValidResetRequest", - res: module.ExpandedResource{Resource: res}, - act: module.ActionRequest{ - Name: ResetAction, - Params: []byte(`{"to":"DATETIME","datetime":"2022-06-22T00:00:00+00:00"}`), - }, - want: &module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:firehose:test", - Kind: "firehose", - Name: "test", - Project: "demo", - Spec: resource.Spec{ - Configs: []byte(`{"state":"RUNNING","stop_time":null,"telegraf":null,"firehose":{"replicas":1,"kafka_broker_address":"localhost:9092","kafka_topic":"test-topic","kafka_consumer_id":"test-consumer-id","env_variables":{}}}`), - }, - State: resource.State{ - Status: resource.StatusPending, - ModuleData: []byte(`{"pending_steps":["release_update","consumer_reset","release_update"],"reset_to":"2022-06-22T00:00:00+00:00","state_override":"STOPPED"}`), - }, - }, - Reason: "firehose consumer reset", - }, - }, - { - title: "WithStopTimeConfiguration", - res: module.ExpandedResource{Resource: res}, - act: module.ActionRequest{ - Name: module.CreateAction, - Params: []byte(`{"state":"RUNNING","stop_time":"3022-07-13T00:40:14.028016Z","firehose":{"replicas":1,"kafka_broker_address":"localhost:9092","kafka_topic":"test-topic","kafka_consumer_id":"test-consumer-id","env_variables":{}}}`), - }, - want: &module.Plan{ - Resource: resource.Resource{ - URN: "orn:entropy:firehose:test", - Kind: "firehose", - Name: "test", - Project: "demo", - Spec: resource.Spec{ - Configs: []byte(`{"state":"RUNNING","stop_time":"3022-07-13T00:40:14.028016Z","telegraf":null,"firehose":{"replicas":1,"kafka_broker_address":"localhost:9092","kafka_topic":"test-topic","kafka_consumer_id":"test-consumer-id","env_variables":{}}}`), - }, - State: resource.State{ - Status: resource.StatusPending, - ModuleData: []byte(`{"pending_steps":["release_create"]}`), - Output: []byte(`{"defaults":{}}`), - }, - }, - ScheduleRunAt: parseTime("3022-07-13T00:40:14.028016Z"), - Reason: "firehose created", - }, - }, - } - - for _, tt := range table { - tt := tt - t.Run(tt.title, func(t *testing.T) { - t.Parallel() - m := firehoseModule{} - - got, err := m.Plan(context.Background(), tt.res, tt.act) - if tt.wantErr != nil || err != nil { - assert.Error(t, err) - assert.True(t, errors.Is(err, tt.wantErr)) - assert.Nil(t, got) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.want, got, cmp.Diff(tt.want, got)) - } - }) - } -} - -func parseTime(timeString string) time.Time { - t, err := time.Parse(time.RFC3339, timeString) - if err != nil { - panic(err) - } - return t -} diff --git a/modules/firehose/schema/config.json b/modules/firehose/schema/config.json index 9b11611c..2d844fc5 100644 --- a/modules/firehose/schema/config.json +++ b/modules/firehose/schema/config.json @@ -2,217 +2,87 @@ "$schema": "http://json-schema.org/draft-07/schema#", "$id": "http://json-schema.org/draft-07/schema#", "type": "object", + "required": [ + "replicas", + "env_variables" + ], "properties": { - "state": { - "type": "string", - "enum": [ - "RUNNING", - "STOPPED" - ], - "default": "RUNNING" - }, "stop_time": { "type": "string", "format": "date-time" }, - "firehose": { + "replicas": { + "type": "number", + "default": 1, + "minimum": 1 + }, + "deployment_id": { + "type": "string" + }, + "env_variables": { "type": "object", + "additionalProperties": true, + "required": [ + "SINK_TYPE", + "INPUT_SCHEMA_PROTO_CLASS", + "SOURCE_KAFKA_BROKERS", + "SOURCE_KAFKA_TOPIC" + ], "properties": { - "replicas": { - "type": "number", - "default": 1, - "minimum": 1 - }, - "kafka_broker_address": { + "SOURCE_KAFKA_CONSUMER_GROUP_ID": { "type": "string" }, - "kafka_topic": { + "SOURCE_KAFKA_TOPIC": { "type": "string" }, - "kafka_consumer_id": { + "SOURCE_KAFKA_BROKERS": { "type": "string" }, - "env_variables": { - "type": "object", - "properties": { - "SINK_TYPE": { - "type": "string", - "enum": [ - "LOG", - "HTTP" - ] - }, - "KAFKA_RECORD_PARSER_MODE": { - "type": "string" - }, - "INPUT_SCHEMA_PROTO_CLASS": { - "type": "string" - } - }, - "additionalProperties": { - "type": "string" - }, - "required": [ - "SINK_TYPE", - "KAFKA_RECORD_PARSER_MODE", - "INPUT_SCHEMA_PROTO_CLASS" - ], - "allOf": [ - { - "if": { - "properties": { - "SINK_TYPE": { - "const": "HTTP" - } - }, - "required": [ - "SINK_TYPE" - ] - }, - "then": { - "properties": { - "SINK_HTTP_RETRY_STATUS_CODE_RANGES": { - "type": "string" - }, - "SINK_HTTP_REQUEST_LOG_STATUS_CODE_RANGES": { - "type": "string" - }, - "SINK_HTTP_REQUEST_TIMEOUT_MS": { - "type": "number" - }, - "SINK_HTTP_REQUEST_METHOD": { - "type": "string", - "enum": [ - "put", - "post" - ] - }, - "SINK_HTTP_MAX_CONNECTIONS": { - "type": "number" - }, - "SINK_HTTP_SERVICE_URL": { - "type": "string" - }, - "SINK_HTTP_HEADERS": { - "type": "string" - }, - "SINK_HTTP_PARAMETER_SOURCE": { - "type": "string", - "enum": [ - "key", - "message", - "disabled" - ] - }, - "SINK_HTTP_DATA_FORMAT": { - "type": "string", - "enum": [ - "proto", - "json" - ] - }, - "SINK_HTTP_OAUTH2_ENABLE": { - "type": "boolean" - }, - "SINK_HTTP_OAUTH2_ACCESS_TOKEN_URL": { - "type": "string" - }, - "SINK_HTTP_OAUTH2_CLIENT_NAME": { - "type": "string" - }, - "SINK_HTTP_OAUTH2_CLIENT_SECRET": { - "type": "string" - }, - "SINK_HTTP_OAUTH2_SCOPE": { - "type": "string" - }, - "SINK_HTTP_JSON_BODY_TEMPLATE": { - "type": "string" - }, - "SINK_HTTP_PARAMETER_PLACEMENT": { - "type": "string", - "enum": [ - "query", - "header" - ] - }, - "SINK_HTTP_PARAMETER_SCHEMA_PROTO_CLASS": { - "type": "string" - } - }, - "required": [ - "SINK_HTTP_PARAMETER_SCHEMA_PROTO_CLASS", - "SINK_HTTP_PARAMETER_PLACEMENT", - "SINK_HTTP_JSON_BODY_TEMPLATE", - "SINK_HTTP_OAUTH2_SCOPE", - "SINK_HTTP_OAUTH2_CLIENT_SECRET", - "SINK_HTTP_OAUTH2_CLIENT_NAME", - "SINK_HTTP_OAUTH2_ACCESS_TOKEN_URL", - "SINK_HTTP_OAUTH2_ENABLE", - "SINK_HTTP_DATA_FORMAT", - "SINK_HTTP_PARAMETER_SOURCE", - "SINK_HTTP_HEADERS", - "SINK_HTTP_SERVICE_URL", - "SINK_HTTP_MAX_CONNECTIONS", - "SINK_HTTP_REQUEST_METHOD", - "SINK_HTTP_REQUEST_TIMEOUT_MS", - "SINK_HTTP_REQUEST_LOG_STATUS_CODE_RANGES", - "SINK_HTTP_RETRY_STATUS_CODE_RANGES" - ] - } - } + "SINK_TYPE": { + "type": "string", + "enum": [ + "JDBC", + "HTTP", + "HTTPV2", + "INFLUXDB", + "ELASTICSEARCH", + "GRPC", + "PROMETHEUS", + "BLOB", + "MONGODB", + "LOG", + "REDIS", + "BIGQUERY", + "BIGTABLE", + "MAXCOMPUTE" ] + }, + "KAFKA_RECORD_PARSER_MODE": { + "type": "string", + "default": "message" + }, + "INPUT_SCHEMA_PROTO_CLASS": { + "type": "string" } - }, - "required": [ - "replicas", - "env_variables", - "kafka_broker_address", - "kafka_topic", - "kafka_consumer_id" - ] + } }, - "telegraf": { + "autoscaler": { "type": "object", "properties": { "enabled": { "type": "boolean", "default": false }, - "config": { - "type": "object", - "properties": { - "output": { - "type": "object", - "properties": { - "prometheus_remote_write": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean", - "default": false - }, - "url": { - "type": "string" - }, - "version": { - "type": "string" - } - }, - "required": [ - "enabled", - "url", - "version" - ] - } - } - } - } + "type": { + "type": "string", + "enum": [ + "keda" + ] + }, + "spec": { + "type": "object" } } } - }, - "required": [ - "firehose" - ] -} \ No newline at end of file + } +} diff --git a/modules/firehose/schema/reset.json b/modules/firehose/schema/reset.json deleted file mode 100644 index 73f9c664..00000000 --- a/modules/firehose/schema/reset.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "to": { - "type": "string", - "enum": ["DATETIME", "EARLIEST", "LATEST"] - } - }, - "if": { - "properties": { - "to": { - "const": "DATETIME" - } - } - }, - "then": { - "properties": { - "datetime": { - "type": "string", - "format": "date-time" - } - } - }, - "required": [ - "to" - ] -} diff --git a/modules/firehose/schema/scale.json b/modules/firehose/schema/scale.json deleted file mode 100644 index f1ee90ff..00000000 --- a/modules/firehose/schema/scale.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "replicas": { - "type": "number", - "minimum": 1 - } - }, - "required": ["replicas"] -} \ No newline at end of file diff --git a/modules/firehose/sync.go b/modules/firehose/sync.go deleted file mode 100644 index dd2178e7..00000000 --- a/modules/firehose/sync.go +++ /dev/null @@ -1,145 +0,0 @@ -package firehose - -import ( - "context" - "encoding/json" - "time" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/modules/firehose/kafka" - "github.com/odpf/entropy/modules/kubernetes" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/helm" - "github.com/odpf/entropy/pkg/kube" - "github.com/odpf/entropy/pkg/worker" -) - -const ( - networkErrorRetryDuration = 5 * time.Second - kubeAPIRetryBackoffDuration = 30 * time.Second -) - -var ( - ErrNetwork = worker.RetryableError{RetryAfter: networkErrorRetryDuration} - ErrKubeAPI = worker.RetryableError{RetryAfter: kubeAPIRetryBackoffDuration} -) - -func (m *firehoseModule) Sync(ctx context.Context, res module.ExpandedResource) (*resource.State, error) { - r := res.Resource - - var data moduleData - var pendingStep string - if err := json.Unmarshal(r.State.ModuleData, &data); err != nil { - return nil, err - } - - if len(data.PendingSteps) != 0 { - pendingStep = data.PendingSteps[0] - data.PendingSteps = data.PendingSteps[1:] - } - - var conf moduleConfig - if err := json.Unmarshal(r.Spec.Configs, &conf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid config json: %v", err) - } - - var kubeOut kubernetes.Output - if err := json.Unmarshal(res.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { - return nil, err - } - - switch pendingStep { - case releaseCreate, releaseUpdate: - if data.StateOverride != "" { - conf.State = data.StateOverride - } - if err := m.releaseSync(pendingStep == releaseCreate, conf, r, kubeOut); err != nil { - return nil, err - } - case consumerReset: - if err := m.consumerReset(ctx, - conf, - r, - data.ResetTo, - kubeOut); err != nil { - return nil, err - } - data.StateOverride = "" - default: - if err := m.releaseSync(pendingStep == releaseCreate, conf, r, kubeOut); err != nil { - return nil, err - } - } - - finalStatus := resource.StatusCompleted - if len(data.PendingSteps) > 0 { - finalStatus = resource.StatusPending - } - - output, err := m.Output(ctx, res) - if err != nil { - return nil, err - } - - return &resource.State{ - Status: finalStatus, - Output: output, - ModuleData: data.JSON(), - }, nil -} - -func (*firehoseModule) releaseSync(isCreate bool, conf moduleConfig, r resource.Resource, kube kubernetes.Output) error { - helmCl := helm.NewClient(&helm.Config{Kubernetes: kube.Configs}) - - if conf.State == stateStopped || (conf.StopTime != nil && conf.StopTime.Before(time.Now())) { - conf.Firehose.Replicas = 0 - } - - hc, err := conf.GetHelmReleaseConfig(r) - if err != nil { - return err - } - - var helmErr error - if isCreate { - _, helmErr = helmCl.Create(hc) - } else { - _, helmErr = helmCl.Update(hc) - } - - return helmErr -} - -func (*firehoseModule) consumerReset(ctx context.Context, conf moduleConfig, r resource.Resource, resetTo string, out kubernetes.Output) error { - releaseConfig, err := conf.GetHelmReleaseConfig(r) - if err != nil { - return err - } - - cgm := kafka.NewConsumerGroupManager(conf.Firehose.KafkaBrokerAddress, kube.NewClient(out.Configs), releaseConfig.Namespace) - - switch resetTo { - case ResetToEarliest: - err = cgm.ResetOffsetToEarliest(ctx, conf.Firehose.KafkaConsumerID) - case ResetToLatest: - err = cgm.ResetOffsetToLatest(ctx, conf.Firehose.KafkaConsumerID) - default: - err = cgm.ResetOffsetToDatetime(ctx, conf.Firehose.KafkaConsumerID, resetTo) - } - - return handleErr(err) -} - -func handleErr(err error) error { - switch { - case errors.Is(err, kube.ErrJobCreationFailed): - return ErrNetwork.WithCause(err) - case errors.Is(err, kube.ErrJobNotFound): - return ErrKubeAPI.WithCause(err) - case errors.Is(err, kube.ErrJobExecutionFailed): - return ErrKubeAPI.WithCause(err) - default: - return err - } -} diff --git a/modules/firehose/test/module-config.json b/modules/firehose/test/module-config.json new file mode 100644 index 00000000..1b0332d2 --- /dev/null +++ b/modules/firehose/test/module-config.json @@ -0,0 +1,365 @@ +{ + "SINK_REDIS_AUTH_PASSWORD": "dummy_password", + "SINK_REDIS_AUTH_USERNAME": "dummy_username", + "big_query_sink_credential": "dummy_credential", + "chart_values": { + "chart_version": "0.0.1", + "image_repository": "dummy_repository", + "image_tag": "0.0.1" + }, + "dlq_gcs_sink_credential": "dummy_credential", + "env_variables": { + "APPLICATION_THREAD_COUNT": "1", + "DLQ_BLOB_STORAGE_TYPE": "DUMMY", + "DLQ_GCS_CREDENTIAL_PATH": "dummy_path", + "DLQ_GCS_DIRECTORY_PREFIX": "dummy_prefix", + "DLQ_GCS_RETRY_DELAY_MULTIPLIER": "1", + "DLQ_GCS_RETRY_INITIAL_DELAY_MS": "100", + "DLQ_GCS_RETRY_INITIAL_RPC_TIMEOUT_MS": "500", + "DLQ_GCS_RETRY_MAX_ATTEMPTS": "5", + "DLQ_GCS_RETRY_MAX_DELAY_MS": "1000", + "DLQ_GCS_RETRY_RPC_MAX_TIMEOUT_MS": "500", + "DLQ_GCS_RETRY_RPC_TIMEOUT_MULTIPLIER": "1", + "DLQ_GCS_RETRY_TOTAL_TIMEOUT_MS": "10000", + "DLQ_KAFKA_ACKS": "dummy", + "DLQ_KAFKA_BATCH_SIZE": "1000", + "DLQ_KAFKA_BROKERS": "dummy_broker", + "DLQ_KAFKA_BUFFER_MEMORY": "1000000", + "DLQ_KAFKA_KEY_SERIALIZER": "dummy_serializer", + "DLQ_KAFKA_LINGER_MS": "1", + "DLQ_KAFKA_RETRIES": "5", + "DLQ_KAFKA_TOPIC": "dummy_topic", + "DLQ_KAFKA_VALUE_SERIALIZER": "dummy_serializer", + "DLQ_OSS_ACCESS_ID": "dummy_access_id", + "DLQ_OSS_ACCESS_KEY": "dummy_access_key", + "DLQ_OSS_BUCKET_NAME": "dummy_bucket", + "DLQ_OSS_CONNECTION_REQUEST_TIMEOUT_MS": "1000", + "DLQ_OSS_CONNECTION_TIMEOUT_MS": "1000", + "DLQ_OSS_DIRECTORY_PREFIX": "dummy_prefix", + "DLQ_OSS_ENDPOINT": "dummy_endpoint", + "DLQ_OSS_MAX_RETRY_ATTEMPTS": "5", + "DLQ_OSS_REGION": "dummy_region", + "DLQ_OSS_REQUEST_TIMEOUT_MS": "1000", + "DLQ_OSS_RETRY_ENABLED": "false", + "DLQ_OSS_SOCKET_TIMEOUT_MS": "1000", + "DLQ_RETRY_FAIL_AFTER_MAX_ATTEMPT_ENABLE": "true", + "DLQ_RETRY_MAX_ATTEMPTS": "5", + "DLQ_SINK_ENABLE": "true", + "DLQ_WRITER_TYPE": "dummy_writer", + "ERROR_TYPES_FOR_DLQ": "dummy_error", + "ERROR_TYPES_FOR_FAILING": "dummy_error", + "ERROR_TYPES_FOR_RETRY": "dummy_error", + "FILTER_DATA_SOURCE": "dummy_source", + "FILTER_ENGINE": "dummy_engine", + "FILTER_ESB_MESSAGE_FORMAT": "dummy_format", + "FILTER_JEXL_EXPRESSION": "dummy_expression", + "FILTER_JSON_SCHEMA": "dummy_schema", + "FILTER_SCHEMA_PROTO_CLASS": "dummy_class", + "INPUT_SCHEMA_DATA_TYPE": "dummy_type", + "INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE": "false", + "INPUT_SCHEMA_PROTO_TO_COLUMN_MAPPING": "dummy_mapping", + "JAVA_TOOL_OPTIONS": "dummy_options", + "LOG_LEVEL": "DEBUG", + "METRIC_STATSD_HOST": "dummy_host", + "METRIC_STATSD_PORT": "1234", + "METRIC_STATSD_TAGS": "dummy_tags", + "RETRY_EXPONENTIAL_BACKOFF_INITIAL_MS": "1", + "RETRY_EXPONENTIAL_BACKOFF_MAX_MS": "10", + "RETRY_EXPONENTIAL_BACKOFF_RATE": "1", + "RETRY_FAIL_AFTER_MAX_ATTEMPTS_ENABLE": "true", + "RETRY_MAX_ATTEMPTS": "5", + "SCHEMA_REGISTRY_STENCIL_CACHE_AUTO_REFRESH": "false", + "SCHEMA_REGISTRY_STENCIL_CACHE_TTL_MS": "1000", + "SCHEMA_REGISTRY_STENCIL_ENABLE": "false", + "SCHEMA_REGISTRY_STENCIL_FETCH_BACKOFF_MIN_MS": "100", + "SCHEMA_REGISTRY_STENCIL_FETCH_HEADERS": "dummy_headers", + "SCHEMA_REGISTRY_STENCIL_FETCH_RETRIES": "1", + "SCHEMA_REGISTRY_STENCIL_FETCH_TIMEOUT_MS": "100", + "SCHEMA_REGISTRY_STENCIL_REFRESH_STRATEGY": "dummy_strategy", + "SINK_BIGQUERY_ADD_EVENT_TIMESTAMP_ENABLE": "true", + "SINK_BIGQUERY_CLIENT_CONNECT_TIMEOUT_MS": "100", + "SINK_BIGQUERY_CLIENT_READ_TIMEOUT_MS": "100", + "SINK_BIGQUERY_DATASET_LABELS": "dummy_labels", + "SINK_BIGQUERY_DATASET_LOCATION": "dummy_location", + "SINK_BIGQUERY_DATASET_NAME": "dummy_dataset", + "SINK_BIGQUERY_DEFAULT_COLUMNS": "dummy_columns", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE": "true", + "SINK_BIGQUERY_DYNAMIC_SCHEMA_ENABLE": "false", + "SINK_BIGQUERY_METADATA_NAMESPACE": "dummy_namespace", + "SINK_BIGQUERY_ROW_INSERT_ID_ENABLE": "false", + "SINK_BIGQUERY_STORAGE_API_ENABLE": "false", + "SINK_BIGQUERY_TABLE_CLUSTERING_ENABLE": "true", + "SINK_BIGQUERY_TABLE_CLUSTERING_KEYS": "dummy_keys", + "SINK_BIGQUERY_TABLE_LABELS": "dummy_labels", + "SINK_BIGQUERY_TABLE_NAME": "dummy_table", + "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE": "true", + "SINK_BIGQUERY_TABLE_PARTITION_EXPIRY_MS": "1000", + "SINK_BIGQUERY_TABLE_PARTITION_KEY": "dummy_key", + "SINK_BIGTABLE_COLUMN_FAMILY_MAPPING": "dummy_mapping", + "SINK_BIGTABLE_INSTANCE_ID": "dummy_instance", + "SINK_BIGTABLE_ROW_KEY_TEMPLATE": "dummy_template", + "SINK_BIGTABLE_TABLE_ID": "dummy_table", + "SINK_BLOB_FILE_PARTITION_PROTO_TIMESTAMP_FIELD_NAME": "dummy_field", + "SINK_BLOB_FILE_PARTITION_PROTO_TIMESTAMP_TIMEZONE": "dummy_timezone", + "SINK_BLOB_FILE_PARTITION_TIME_DATE_PREFIX": "dummy_prefix", + "SINK_BLOB_FILE_PARTITION_TIME_GRANULARITY_TYPE": "dummy_granularity", + "SINK_BLOB_FILE_PARTITION_TIME_HOUR_PREFIX": "dummy_hour_prefix", + "SINK_BLOB_GCS_BUCKET_NAME": "dummy_bucket", + "SINK_BLOB_GCS_RETRY_DELAY_MULTIPLIER": "1", + "SINK_BLOB_GCS_RETRY_INITIAL_DELAY_MS": "100", + "SINK_BLOB_GCS_RETRY_INITIAL_RPC_TIMEOUT_MS": "500", + "SINK_BLOB_GCS_RETRY_MAX_ATTEMPTS": "5", + "SINK_BLOB_GCS_RETRY_MAX_DELAY_MS": "1000", + "SINK_BLOB_GCS_RETRY_RPC_MAX_TIMEOUT_MS": "500", + "SINK_BLOB_GCS_RETRY_RPC_TIMEOUT_MULTIPLIER": "1", + "SINK_BLOB_GCS_RETRY_TOTAL_TIMEOUT_MS": "10000", + "SINK_BLOB_LOCAL_DIRECTORY": "dummy_directory", + "SINK_BLOB_LOCAL_FILE_ROTATION_DURATION_MS": "1000", + "SINK_BLOB_LOCAL_FILE_ROTATION_MAX_SIZE_BYTES": "1000", + "SINK_BLOB_LOCAL_FILE_WRITER_PARQUET_BLOCK_SIZE": "1000", + "SINK_BLOB_LOCAL_FILE_WRITER_PARQUET_PAGE_SIZE": "1000", + "SINK_BLOB_LOCAL_FILE_WRITER_TYPE": "dummy_type", + "SINK_BLOB_OSS_ACCESS_ID": "dummy_access_id", + "SINK_BLOB_OSS_ACCESS_KEY": "dummy_access_key", + "SINK_BLOB_OSS_BUCKET_NAME": "dummy_bucket", + "SINK_BLOB_OSS_CONNECTION_REQUEST_TIMEOUT_MS": "1000", + "SINK_BLOB_OSS_CONNECTION_TIMEOUT_MS": "1000", + "SINK_BLOB_OSS_ENDPOINT": "dummy_endpoint", + "SINK_BLOB_OSS_MAX_RETRY_ATTEMPTS": "5", + "SINK_BLOB_OSS_REGION": "dummy_region", + "SINK_BLOB_OSS_REQUEST_TIMEOUT_MS": "1000", + "SINK_BLOB_OSS_RETRY_ENABLED": "false", + "SINK_BLOB_OSS_SOCKET_TIMEOUT_MS": "1000", + "SINK_BLOB_OUTPUT_INCLUDE_KAFKA_METADATA_ENABLE": "false", + "SINK_BLOB_OUTPUT_KAFKA_METADATA_COLUMN_NAME": "dummy_column", + "SINK_BLOB_STORAGE_TYPE": "dummy_storage", + "SINK_ES_CONNECTION_URLS": "dummy_urls", + "SINK_ES_ID_FIELD": "dummy_field", + "SINK_ES_INDEX_NAME": "dummy_index", + "SINK_ES_INPUT_MESSAGE_TYPE": "dummy_type", + "SINK_ES_MODE_UPDATE_ONLY_ENABLE": "true", + "SINK_ES_PRESERVE_PROTO_FIELD_NAMES_ENABLE": "true", + "SINK_ES_REQUEST_TIMEOUT_MS": "1000", + "SINK_ES_RETRY_STATUS_CODE_BLACKLIST": "dummy_blacklist", + "SINK_ES_ROUTING_KEY_NAME": "dummy_key", + "SINK_ES_SHARDS_ACTIVE_WAIT_COUNT": "1", + "SINK_ES_TYPE_NAME": "dummy_type", + "SINK_GRPC_METADATA": "dummy_metadata", + "SINK_GRPC_METHOD_URL": "dummy_method_url", + "SINK_GRPC_RESPONSE_SCHEMA_PROTO_CLASS": "dummy_proto_class", + "SINK_GRPC_ROOT_CA": "dummy_root_ca", + "SINK_GRPC_SERVICE_HOST": "dummy_service_host", + "SINK_GRPC_SERVICE_PORT": "1234", + "SINK_GRPC_TLS_ENABLE": "true", + "SINK_HTTPV2_DEFAULT_FIELD_VALUE_ENABLE": "false", + "SINK_HTTPV2_DELETE_BODY_ENABLE": "true", + "SINK_HTTPV2_HEADERS": "Authorization:dummy_token,Accept:application/json", + "SINK_HTTPV2_HEADERS_PARAMETER_SOURCE": "dummy_source", + "SINK_HTTPV2_HEADERS_TEMPLATE": "dummy_template", + "SINK_HTTPV2_JSON_BODY_TEMPLATE": "dummy_json_template", + "SINK_HTTPV2_MAX_CONNECTIONS": "5", + "SINK_HTTPV2_OAUTH2_ACCESS_TOKEN_URL": "https://dummy.url", + "SINK_HTTPV2_OAUTH2_CLIENT_NAME": "dummy_client_name", + "SINK_HTTPV2_OAUTH2_CLIENT_SECRET": "dummy_client_secret", + "SINK_HTTPV2_OAUTH2_ENABLE": "true", + "SINK_HTTPV2_OAUTH2_SCOPE": "dummy_scope", + "SINK_HTTPV2_QUERY_PARAMETER_SOURCE": "dummy_query_source", + "SINK_HTTPV2_QUERY_TEMPLATE": "dummy_query_template", + "SINK_HTTPV2_REQUEST_BODY_MODE": "dummy_mode", + "SINK_HTTPV2_REQUEST_LOG_STATUS_CODE_RANGES": "200-300", + "SINK_HTTPV2_REQUEST_METHOD": "post", + "SINK_HTTPV2_REQUEST_MODE": "batch", + "SINK_HTTPV2_REQUEST_TIMEOUT_MS": "5000", + "SINK_HTTPV2_RETRY_STATUS_CODE_RANGES": "500-600", + "SINK_HTTPV2_SERVICE_URL": "https://dummy.service.url", + "SINK_HTTP_DATA_FORMAT": "json", + "SINK_HTTP_DELETE_BODY_ENABLE": "true", + "SINK_HTTP_HEADERS": "Authorization:dummy_token,Accept:application/json", + "SINK_HTTP_JSON_BODY_TEMPLATE": "dummy_body_template", + "SINK_HTTP_MAX_CONNECTIONS": "5", + "SINK_HTTP_OAUTH2_ACCESS_TOKEN_URL": "https://dummy.oauth.url", + "SINK_HTTP_OAUTH2_CLIENT_NAME": "dummy_client", + "SINK_HTTP_OAUTH2_CLIENT_SECRET": "dummy_secret", + "SINK_HTTP_OAUTH2_ENABLE": "true", + "SINK_HTTP_OAUTH2_SCOPE": "dummy_scope", + "SINK_HTTP_PARAMETER_PLACEMENT": "query", + "SINK_HTTP_PARAMETER_SCHEMA_PROTO_CLASS": "dummy_proto_class", + "SINK_HTTP_PARAMETER_SOURCE": "enabled", + "SINK_HTTP_REQUEST_LOG_STATUS_CODE_RANGES": "200-400", + "SINK_HTTP_REQUEST_METHOD": "get", + "SINK_HTTP_REQUEST_TIMEOUT_MS": "3000", + "SINK_HTTP_RETRY_STATUS_CODE_RANGES": "500-600", + "SINK_HTTP_SERVICE_URL": "https://dummy.service.url", + "SINK_HTTP_SIMPLE_DATE_FORMAT_ENABLE": "true", + "SINK_INFLUX_DB_NAME": "dummy_db", + "SINK_INFLUX_FIELD_NAME_PROTO_INDEX_MAPPING": "dummy_mapping", + "SINK_INFLUX_MEASUREMENT_NAME": "dummy_measurement", + "SINK_INFLUX_PASSWORD": "dummy_password", + "SINK_INFLUX_PROTO_EVENT_TIMESTAMP_INDEX": "dummy_index", + "SINK_INFLUX_RETENTION_POLICY": "dummy_policy", + "SINK_INFLUX_TAG_NAME_PROTO_INDEX_MAPPING": "dummy_mapping", + "SINK_INFLUX_URL": "https://dummy.influx.url", + "SINK_INFLUX_USERNAME": "dummy_user", + "SINK_JDBC_CONNECTION_POOL_IDLE_TIMEOUT_MS": "10000", + "SINK_JDBC_CONNECTION_POOL_MAX_SIZE": "5", + "SINK_JDBC_CONNECTION_POOL_MIN_IDLE": "1", + "SINK_JDBC_CONNECTION_POOL_TIMEOUT_MS": "1000", + "SINK_JDBC_PASSWORD": "dummy_password", + "SINK_JDBC_TABLE_NAME": "dummy_table", + "SINK_JDBC_UNIQUE_KEYS": "dummy_keys", + "SINK_JDBC_URL": "jdbc:dummy:url", + "SINK_JDBC_USERNAME": "dummy_user", + "SINK_MAXCOMPUTE_ACCESS_ID": "dummy_access_id", + "SINK_MAXCOMPUTE_ACCESS_KEY": "dummy_access_key", + "SINK_MAXCOMPUTE_ADD_METADATA_ENABLED": "false", + "SINK_MAXCOMPUTE_DDL_RETRY_BACKOFF_MILLIS": "500", + "SINK_MAXCOMPUTE_MAX_DDL_RETRY_COUNT": "5", + "SINK_MAXCOMPUTE_MAX_FUTURE_EVENT_TIME_DIFFERENCE_YEAR": "2", + "SINK_MAXCOMPUTE_MAX_PAST_EVENT_TIME_DIFFERENCE_YEAR": "3", + "SINK_MAXCOMPUTE_METADATA_COLUMNS_TYPES": "dummy_columns", + "SINK_MAXCOMPUTE_METADATA_NAMESPACE": "dummy_namespace", + "SINK_MAXCOMPUTE_ODPS_GLOBAL_SETTINGS": "dummy_settings", + "SINK_MAXCOMPUTE_ODPS_URL": "https://dummy.odps.url", + "SINK_MAXCOMPUTE_PROJECT_ID": "dummy_project", + "SINK_MAXCOMPUTE_PROTO_DOUBLE_TYPE_TO_DECIMAL_ENABLED": "true", + "SINK_MAXCOMPUTE_PROTO_DOUBLE_TYPE_TO_DECIMAL_PRECISION": "10", + "SINK_MAXCOMPUTE_PROTO_DOUBLE_TYPE_TO_DECIMAL_SCALE": "2", + "SINK_MAXCOMPUTE_PROTO_FLOAT_TYPE_TO_DECIMAL_ENABLED": "true", + "SINK_MAXCOMPUTE_PROTO_FLOAT_TYPE_TO_DECIMAL_PRECISION": "10", + "SINK_MAXCOMPUTE_PROTO_FLOAT_TYPE_TO_DECIMAL_SCALE": "2", + "SINK_MAXCOMPUTE_PROTO_FLOAT_TYPE_TO_DOUBLE_ENABLED": "true", + "SINK_MAXCOMPUTE_PROTO_INTEGER_TYPES_TO_BIGINT_ENABLED": "true", + "SINK_MAXCOMPUTE_RECORD_PACK_FLUSH_TIMEOUT_MS": "10000", + "SINK_MAXCOMPUTE_STREAMING_INSERT_COMPRESSION_ALGORITHM": "dummy_algorithm", + "SINK_MAXCOMPUTE_STREAMING_INSERT_COMPRESSION_ENABLED": "true", + "SINK_MAXCOMPUTE_STREAMING_INSERT_COMPRESSION_LEVEL": "5", + "SINK_MAXCOMPUTE_STREAMING_INSERT_COMPRESSION_STRATEGY": "1", + "SINK_MAXCOMPUTE_STREAMING_INSERT_MAXIMUM_SESSION_COUNT": "3", + "SINK_MAXCOMPUTE_TABLE_PARTITIONING_ENABLE": "true", + "SINK_MAXCOMPUTE_TABLE_PARTITION_BY_TIMESTAMP_TIME_UNIT": "HOUR", + "SINK_MAXCOMPUTE_TABLE_PARTITION_COLUMN_NAME": "dummy_partition", + "SINK_MAXCOMPUTE_TABLE_PROPERTIES": "dummy_properties", + "SINK_MAXCOMPUTE_TABLE_VALIDATOR_MAX_COLUMNS_PER_TABLE": "100", + "SINK_MAXCOMPUTE_TABLE_VALIDATOR_MAX_PARTITION_KEYS_PER_TABLE": "2", + "SINK_MAXCOMPUTE_TABLE_VALIDATOR_NAME_REGEX": "dummy_regex", + "SINK_MAXCOMPUTE_TIME_UNIT_TYPE": "dummy_unit", + "SINK_MAXCOMPUTE_VALID_MAX_TIMESTAMP": "2099-12-31T23:59:59", + "SINK_MAXCOMPUTE_VALID_MIN_TIMESTAMP": "2000-01-01T00:00:00", + "SINK_MAXCOMPUTE_ZONE_ID": "dummy_zone", + "SINK_POOL_NUM_THREADS": "2", + "SINK_POOL_QUEUE_POLL_TIMEOUT_MS": "500", + "SINK_PROM_HEADERS": "dummy_headers", + "SINK_PROM_LABEL_NAME_PROTO_INDEX_MAPPING": "dummy_mapping", + "SINK_PROM_METRIC_NAME_PROTO_INDEX_MAPPING": "dummy_mapping", + "SINK_PROM_PROTO_EVENT_TIMESTAMP_INDEX": "dummy_index", + "SINK_PROM_REQUEST_LOG_STATUS_CODE_RANGES": "200-400", + "SINK_PROM_REQUEST_TIMEOUT_MS": "5000", + "SINK_PROM_RETRY_STATUS_CODE_RANGES": "500-600", + "SINK_PROM_SERVICE_URL": "https://dummy.prom.url", + "SINK_PROM_WITH_EVENT_TIMESTAMP": "true", + "SINK_REDIS_CONNECTION_MAX_RETRIES": "3", + "SINK_REDIS_CONNECTION_RETRY_BACKOFF_MS": "1000", + "SINK_REDIS_CONNECTION_TIMEOUT_MS": "2000", + "SINK_REDIS_DATA_TYPE": "dummy_type", + "SINK_REDIS_DEPLOYMENT_TYPE": "cluster", + "SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING": "{\"dummy_field\":\"dummy_column\"}", + "SINK_REDIS_KEY_TEMPLATE": "dummy_key_template", + "SINK_REDIS_KEY_VALUE_DATA_FIELD_NAME": "dummy_field", + "SINK_REDIS_KEY_VALUE_DATA_PROTO_INDEX": "dummy_index", + "SINK_REDIS_LIST_DATA_FIELD_NAME": "dummy_list_field", + "SINK_REDIS_LIST_DATA_PROTO_INDEX": "dummy_list_index", + "SINK_REDIS_SOCKET_TIMEOUT_MS": "5000", + "SINK_REDIS_TTL_TYPE": "EXPIRE", + "SINK_REDIS_TTL_VALUE": "3600", + "SINK_REDIS_URLS": "redis://dummy.redis.url", + "SOURCE_KAFKA_ASYNC_COMMIT_ENABLE": "false", + "SOURCE_KAFKA_COMMIT_ONLY_CURRENT_PARTITIONS_ENABLE": "false", + "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_COMMIT_ENABLE": "true", + "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET": "earliest", + "SOURCE_KAFKA_CONSUMER_CONFIG_FETCH_MIN_BYTES": "10", + "SOURCE_KAFKA_CONSUMER_CONFIG_MANUAL_COMMIT_MIN_INTERVAL_MS": "1000", + "SOURCE_KAFKA_CONSUMER_CONFIG_MAX_POLL_RECORDS": "100", + "SOURCE_KAFKA_CONSUMER_CONFIG_METADATA_MAX_AGE_MS": "1000", + "SOURCE_KAFKA_CONSUMER_CONFIG_PARTITION_ASSIGNMENT_STRATEGY": "dummy_strategy", + "SOURCE_KAFKA_CONSUMER_CONFIG_SESSION_TIMEOUT_MS": "5000", + "SOURCE_KAFKA_CONSUMER_MODE": "async", + "SOURCE_KAFKA_POLL_TIMEOUT_MS": "10000", + "_JAVA_OPTIONS": "-Xmx512m -Xms512m" + }, + "gcs_sink_credential": "dummy_credential", + "init_container": { + "args": [ + "wget -O /work-dir/dummy-file1.jar http://dummy-url.com/dummy-path/dummy-file1.jar;wget -O /work-dir/dummy-file2.jar http://dummy-url.com/dummy-path/dummy-file2.jar;wget -O /work-dir/dummy-file3.jar http://dummy-url.com/dummy-path/dummy-file3.jar" + ], + "command": [ + "/bin/sh", + "-c" + ], + "enabled": true, + "image_tag": "dummy_tag", + "pull_policy": "IfNotPresent", + "repository": "dummy_repository" + }, + "kube_deploy_timeout_seconds": 60, + "labels": { + "app-id": "dummy-app-id", + "application": "dummy_application", + "component": "dummy_component", + "environment": "dummy_environment", + "instance-id": "dummy_instance_id", + "landscape": "dummy_landscape", + "lifecycle": "dummy_lifecycle", + "odin_deployment": "dummy_odin_deployment", + "org": "dummy_org", + "owner": "dummy_owner", + "platform_team_id": "dummy_platform_team_id", + "product-group-id": "dummy_product_group_id", + "product-group-name": "dummy_product_group_name", + "projectID": "dummy_project_id", + "team-id": "dummy_team_id", + "team-name": "dummy_team_name", + "tenant": "dummy_tenant" + }, + "namespace": { + "default": "dummy_namespace" + }, + "offset_reset_delay_seconds": 10, + "requests_and_limits": { + "default": { + "limits": { + "cpu": "dummy_cpu_limit", + "memory": "dummy_memory_limit" + }, + "requests": { + "cpu": "dummy_cpu_request", + "memory": "dummy_memory_request" + } + } + }, + "telegraf": { + "config": { + "additional_global_tags": { + "app": "dummy_app", + "consumer_group": "dummy_consumer_group", + "deployment": "dummy_deployment", + "environment": "dummy_environment", + "organization": "dummy_organization", + "projectID": "dummy_project_id", + "sink": "dummy_sink", + "team": "dummy_team" + }, + "output": { + "prometheus_remote_write": { + "enabled": true, + "url": "http://dummy-url.com/dummy-path" + } + } + }, + "enabled": true, + "image": { + "pullPolicy": "IfNotPresent", + "repository": "dummy_repository", + "tag": "dummy_tag" + } + } +} \ No newline at end of file diff --git a/modules/flink/config.go b/modules/flink/config.go new file mode 100644 index 00000000..58fe5fdb --- /dev/null +++ b/modules/flink/config.go @@ -0,0 +1,64 @@ +package flink + +import ( + _ "embed" + "encoding/json" + + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/validator" +) + +var ( + //go:embed schema/config.json + configSchemaRaw []byte + + validateConfig = validator.FromJSONSchema(configSchemaRaw) +) + +type Influx struct { + URL string `json:"url,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + DatabaseName string `json:"database_name,omitempty"` +} + +type Config struct { + KubeNamespace string `json:"kube_namespace,omitempty"` + Influx Influx `json:"influx,omitempty"` + SinkKafkaStream string `json:"sink_kafka_stream,omitempty"` + PrometheusURL string `json:"prometheus_url,omitempty"` + FlinkName string `json:"flink_name,omitempty"` + ExtraStreams []string `json:"extra_streams,omitempty"` +} + +func readConfig(confJSON json.RawMessage, dc driverConf) (*Config, error) { + var cfg Config + if err := json.Unmarshal(confJSON, &cfg); err != nil { + return nil, errors.ErrInvalid.WithMsgf("invalid config json").WithCausef(err.Error()) + } + + if cfg.Influx.URL == "" { + cfg.Influx.URL = dc.Influx.URL + cfg.Influx.Username = dc.Influx.Username + cfg.Influx.Password = dc.Influx.Password + cfg.Influx.DatabaseName = dc.Influx.DatabaseName + } + + if cfg.SinkKafkaStream == "" { + cfg.SinkKafkaStream = dc.SinkKafkaStream + } + + if cfg.KubeNamespace == "" { + cfg.KubeNamespace = dc.KubeNamespace + } + + if cfg.PrometheusURL == "" { + cfg.PrometheusURL = dc.PrometheusURL + } + + if cfg.FlinkName == "" { + cfg.FlinkName = dc.FlinkName + } + + return &cfg, nil +} diff --git a/modules/flink/driver.go b/modules/flink/driver.go new file mode 100644 index 00000000..9f20a2c6 --- /dev/null +++ b/modules/flink/driver.go @@ -0,0 +1,42 @@ +package flink + +import ( + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" +) + +type flinkDriver struct { + conf driverConf +} + +type driverConf struct { + Influx Influx `json:"influx,omitempty"` + SinkKafkaStream string `json:"sink_kafka_stream,omitempty"` + KubeNamespace string `json:"kube_namespace,omitempty"` + PrometheusURL string `json:"prometheus_url,omitempty"` + FlinkName string `json:"flink_name,omitempty"` +} + +type Output struct { + KubeCluster kubernetes.Output `json:"kube_cluster,omitempty"` + KubeNamespace string `json:"kube_namespace,omitempty"` + Influx Influx `json:"influx,omitempty"` + SinkKafkaStream string `json:"sink_kafka_stream,omitempty"` + PrometheusURL string `json:"prometheus_url,omitempty"` + FlinkName string `json:"flink_name,omitempty"` + ExtraStreams []string `json:"extra_streams,omitempty"` +} + +func readOutputData(exr module.ExpandedResource) (*Output, error) { + var curOut Output + if len(exr.Resource.State.Output) == 0 { + return &curOut, nil + } + if err := json.Unmarshal(exr.Resource.State.Output, &curOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("corrupted output").WithCausef(err.Error()) + } + return &curOut, nil +} diff --git a/modules/flink/driver_output.go b/modules/flink/driver_output.go new file mode 100644 index 00000000..171823d9 --- /dev/null +++ b/modules/flink/driver_output.go @@ -0,0 +1,41 @@ +package flink + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" +) + +func (fd *flinkDriver) Output(ctx context.Context, exr module.ExpandedResource) (json.RawMessage, error) { + output, err := readOutputData(exr) + if err != nil { + return nil, err + } + + conf, err := readConfig(exr.Resource.Spec.Configs, fd.conf) + if err != nil { + if errors.Is(err, errors.ErrInvalid) { + return nil, err + } + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + var kubeOut kubernetes.Output + if err := json.Unmarshal(exr.Dependencies[keyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + output.KubeCluster = kubeOut + output.Influx = conf.Influx + output.KubeNamespace = conf.KubeNamespace + output.SinkKafkaStream = conf.SinkKafkaStream + output.PrometheusURL = conf.PrometheusURL + output.FlinkName = conf.FlinkName + output.ExtraStreams = conf.ExtraStreams + + return modules.MustJSON(output), nil +} diff --git a/modules/flink/driver_plan.go b/modules/flink/driver_plan.go new file mode 100644 index 00000000..0c6c3c2d --- /dev/null +++ b/modules/flink/driver_plan.go @@ -0,0 +1,35 @@ +package flink + +import ( + "context" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" +) + +func (fd *flinkDriver) Plan(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + res.Resource.Spec = resource.Spec{ + Configs: act.Params, + Dependencies: res.Spec.Dependencies, + } + + conf, err := readConfig(res.Resource.Spec.Configs, fd.conf) + if err != nil { + return nil, err + } + + res.Resource.Spec.Configs = modules.MustJSON(conf) + + output, err := fd.Output(ctx, res) + if err != nil { + return nil, err + } + + res.Resource.State = resource.State{ + Status: resource.StatusCompleted, + Output: output, + } + + return &res.Resource, nil +} diff --git a/modules/flink/driver_sync.go b/modules/flink/driver_sync.go new file mode 100644 index 00000000..f32c891c --- /dev/null +++ b/modules/flink/driver_sync.go @@ -0,0 +1,16 @@ +package flink + +import ( + "context" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" +) + +func (*flinkDriver) Sync(_ context.Context, res module.ExpandedResource) (*resource.State, error) { + return &resource.State{ + Status: resource.StatusCompleted, + Output: res.Resource.State.Output, + ModuleData: nil, + }, nil +} diff --git a/modules/flink/module.go b/modules/flink/module.go new file mode 100644 index 00000000..4a319d22 --- /dev/null +++ b/modules/flink/module.go @@ -0,0 +1,33 @@ +package flink + +import ( + _ "embed" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/pkg/errors" +) + +const ( + keyKubeDependency = "kube_cluster" +) + +var Module = module.Descriptor{ + Kind: "flink", + Actions: []module.ActionDesc{ + { + Name: module.CreateAction, + }, + { + Name: module.UpdateAction, + }, + }, + DriverFactory: func(conf json.RawMessage) (module.Driver, error) { + fd := &flinkDriver{} + err := json.Unmarshal(conf, &fd) + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("failed to unmarshal module config: %v", err) + } + return fd, nil + }, +} diff --git a/modules/flink/schema/config.json b/modules/flink/schema/config.json new file mode 100644 index 00000000..a78df504 --- /dev/null +++ b/modules/flink/schema/config.json @@ -0,0 +1,27 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "kube_namespace": { + "type": "string" + }, + "influx": { + "type": "object", + "properties": { + "url": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + } + } + }, + "sink_kafka_stream": { + "type": "string" + } + } +} \ No newline at end of file diff --git a/modules/job/config/config.go b/modules/job/config/config.go new file mode 100644 index 00000000..c4b3b4e7 --- /dev/null +++ b/modules/job/config/config.go @@ -0,0 +1,124 @@ +package config + +import ( + _ "embed" + "encoding/json" + "fmt" + + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/validator" +) + +const ( + maxJobNameLength = 53 + Default = "default" +) + +var ( + //go:embed schema/config.json + configSchemaRaw []byte + validateConfig = validator.FromJSONSchema(configSchemaRaw) +) + +type DriverConf struct { + Namespace string `json:"namespace"` // maybe we shouldn't restrict namespace? + RequestsAndLimits map[string]RequestsAndLimits `json:"requestsAndLimits"` // to use when not provided + EnvVariables map[string]string `json:"env_variables"` +} + +type RequestsAndLimits struct { + Limits UsageSpec `json:"limits,omitempty"` + Requests UsageSpec `json:"requests,omitempty"` +} +type UsageSpec struct { + CPU string `json:"cpu,omitempty" validate:"required"` + Memory string `json:"memory,omitempty" validate:"required"` +} + +type Config struct { + Replicas int32 `json:"replicas"` + Namespace string `json:"namespace"` + Name string `json:"name,omitempty"` + Containers []Container `json:"containers,omitempty"` + JobLabels map[string]string `json:"job_labels,omitempty"` + Volumes []Volume `json:"volumes,omitempty"` + TTLSeconds *int32 `json:"ttl_seconds,omitempty"` +} + +type Volume struct { + Name string + Kind string +} + +type Container struct { + Name string `json:"name"` + Image string `json:"image"` + ImagePullPolicy string `json:"image_pull_policy,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + SecretsVolumes []Secret `json:"secrets_volumes,omitempty"` + ConfigMapsVolumes []ConfigMap `json:"config_maps_volumes,omitempty"` + Limits UsageSpec `json:"limits,omitempty"` + Requests UsageSpec `json:"requests,omitempty"` + EnvConfigMaps []string `json:"env_config_maps,omitempty"` + EnvVariables map[string]string `json:"env_variables,omitempty"` + PreStopCmd []string `json:"pre_stop_cmd,omitempty"` + PostStartCmd []string `json:"post_start_cmd,omitempty"` +} + +type Secret struct { + Name string `json:"name"` + Mount string `json:"mount"` +} + +type ConfigMap struct { + Name string `json:"name"` + Mount string `json:"mount"` +} + +func (dc DriverConf) getDefaultResources() RequestsAndLimits { + return dc.RequestsAndLimits[Default] +} + +func ReadConfig(r resource.Resource, confJSON json.RawMessage, dc DriverConf) (*Config, error) { + var cfg Config + if err := json.Unmarshal(confJSON, &cfg); err != nil { + return nil, errors.ErrInvalid.WithMsgf("invalid config json").WithCausef(err.Error()) + } + // for each container + rl := dc.getDefaultResources() + for i := range cfg.Containers { + c := &cfg.Containers[i] + c.EnvVariables = modules.CloneAndMergeMaps(dc.EnvVariables, c.EnvVariables) + if c.Requests.CPU == "" { + c.Requests.CPU = rl.Requests.CPU + } + if c.Requests.Memory == "" { + c.Requests.Memory = rl.Requests.Memory + } + if c.Limits.CPU == "" { + c.Limits.CPU = rl.Limits.CPU + } + if c.Limits.Memory == "" { + c.Limits.Memory = rl.Limits.Memory + } + } + if err := validateConfig(confJSON); err != nil { + return nil, err + } + + if len(cfg.Name) == 0 { + cfg.Name = modules.SafeName(fmt.Sprintf("%s-%s", r.Project, r.Name), "-job", maxJobNameLength) + } else if len(cfg.Name) > maxJobNameLength { + return nil, errors.ErrInvalid.WithMsgf("Job name must not have more than %d chars", maxJobNameLength) + } + if len(cfg.Namespace) == 0 { + cfg.Namespace = dc.Namespace + } + if cfg.Replicas < 1 { + cfg.Replicas = 1 + } + return &cfg, nil +} diff --git a/modules/job/config/schema/config.json b/modules/job/config/schema/config.json new file mode 100644 index 00000000..85b3437f --- /dev/null +++ b/modules/job/config/schema/config.json @@ -0,0 +1,31 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": [ + "namespace" + ], + "properties": { + "replicas": { + "type": "number", + "default": 1, + "minimum": 0 + }, + "deployment_id": { + "type": "string" + }, + "containers": { + "type": "array", + "properties": { + "env_variables": { + "type": "object", + "additionalProperties": true, + "required": [ + ], + "properties": { + } + } + } + } + } +} diff --git a/modules/job/driver/driver.go b/modules/job/driver/driver.go new file mode 100644 index 00000000..8c76f0ec --- /dev/null +++ b/modules/job/driver/driver.go @@ -0,0 +1,128 @@ +package driver + +import ( + "context" + "encoding/json" + "time" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/job/config" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" + "github.com/goto/entropy/pkg/kube/job" +) + +type Driver struct { + Conf config.DriverConf + CreateJob func(ctx context.Context, conf kube.Config, j *job.Job) error + SuspendJob func(ctx context.Context, conf kube.Config, j *job.Job) error + DeleteJob func(ctx context.Context, conf kube.Config, j *job.Job) error + StartJob func(ctx context.Context, conf kube.Config, j *job.Job) error + GetJobPods func(ctx context.Context, conf kube.Config, j *job.Job, labels map[string]string) ([]kube.Pod, error) + StreamLogs func(ctx context.Context, kubeConf kube.Config, j *job.Job, filter map[string]string) (<-chan module.LogChunk, error) +} + +func (driver *Driver) Plan(_ context.Context, res module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + switch act.Name { + case module.CreateAction: + return driver.planCreate(res, act) + case SuspendAction: + return driver.planSuspend(res) + case module.DeleteAction: + return driver.planDelete(res) + case StartAction: + return driver.planStart(res) + default: + return &resource.Resource{}, nil + } +} + +func (driver *Driver) Sync(ctx context.Context, exr module.ExpandedResource) (*resource.State, error) { + modData, err := ReadTransientData(exr) + if err != nil { + return nil, err + } + + out, err := ReadOutputData(exr) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + conf, err := config.ReadConfig(exr.Resource, exr.Spec.Configs, driver.Conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + var kubeOut kubernetes.Output + if err := json.Unmarshal(exr.Dependencies[KeyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + finalState := resource.State{ + Status: resource.StatusPending, + Output: exr.Resource.State.Output, + } + + if len(modData.PendingSteps) > 0 { + pendingStep := modData.PendingSteps[0] + modData.PendingSteps = modData.PendingSteps[1:] + switch pendingStep { + case Create: + if err := driver.create(ctx, exr.Resource, conf, kubeOut); err != nil { + return nil, err + } + case Suspend: + if err := driver.suspend(ctx, conf, kubeOut); err != nil { + return nil, err + } + case Delete: + if err := driver.delete(ctx, conf, kubeOut); err != nil { + return nil, err + } + case Start: + if err := driver.start(ctx, conf, kubeOut); err != nil { + return nil, err + } + default: + return nil, errors.ErrInternal.WithMsgf("unknown step: '%s'", pendingStep) + } + + immediately := time.Now() + finalState.NextSyncAt = &immediately + finalState.ModuleData = modules.MustJSON(modData) + + return &finalState, nil + } + + finalOut, err := driver.refreshOutput(ctx, *conf, *out, kubeOut) + if err != nil { + return nil, err + } + finalState.Output = finalOut + + finalState.Status = resource.StatusCompleted + finalState.ModuleData = nil + return &finalState, nil +} + +func (driver *Driver) Output(ctx context.Context, exr module.ExpandedResource) (json.RawMessage, error) { + output, err := ReadOutputData(exr) + if err != nil { + return nil, err + } + + conf, err := config.ReadConfig(exr.Resource, exr.Spec.Configs, driver.Conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + var kubeOut kubernetes.Output + if err := json.Unmarshal(exr.Dependencies[KeyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("invalid kube state").WithCausef(err.Error()) + } + + return driver.refreshOutput(ctx, *conf, *output, kubeOut) +} diff --git a/modules/job/driver/log.go b/modules/job/driver/log.go new file mode 100644 index 00000000..38cba67a --- /dev/null +++ b/modules/job/driver/log.go @@ -0,0 +1,31 @@ +package driver + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules/job/config" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube/job" +) + +func (driver *Driver) Log(ctx context.Context, res module.ExpandedResource, filter map[string]string) (<-chan module.LogChunk, error) { + conf, err := config.ReadConfig(res.Resource, res.Spec.Configs, driver.Conf) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + + if filter == nil { + filter = map[string]string{} + } + filter["app"] = conf.Name + + var kubeOut kubernetes.Output + if err := json.Unmarshal(res.Dependencies[KeyKubeDependency].Output, &kubeOut); err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + j := &job.Job{Name: conf.Name, Namespace: conf.Namespace} + return driver.StreamLogs(ctx, kubeOut.Configs, j, filter) +} diff --git a/modules/job/driver/output.go b/modules/job/driver/output.go new file mode 100644 index 00000000..dc385aa6 --- /dev/null +++ b/modules/job/driver/output.go @@ -0,0 +1,42 @@ +package driver + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/job/config" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" + "github.com/goto/entropy/pkg/kube/job" +) + +type Output struct { + Namespace string `json:"namespace"` + JobName string `json:"jobName"` + Pods []kube.Pod `json:"pods"` +} + +func (driver *Driver) refreshOutput(ctx context.Context, conf config.Config, output Output, kubeOut kubernetes.Output) (json.RawMessage, error) { + j := &job.Job{Name: conf.Name, Namespace: conf.Namespace} + pods, err := driver.GetJobPods(ctx, kubeOut.Configs, j, map[string]string{"job-name": conf.Name}) + if err != nil { + return nil, errors.ErrInternal.WithCausef(err.Error()) + } + output.Pods = pods + + return modules.MustJSON(output), nil +} + +func ReadOutputData(exr module.ExpandedResource) (*Output, error) { + var curOut Output + if len(exr.Resource.State.Output) == 0 { + return &curOut, nil + } + if err := json.Unmarshal(exr.Resource.State.Output, &curOut); err != nil { + return nil, errors.ErrInternal.WithMsgf("corrupted output").WithCausef(err.Error()) + } + return &curOut, nil +} diff --git a/modules/job/driver/plan.go b/modules/job/driver/plan.go new file mode 100644 index 00000000..c3699d52 --- /dev/null +++ b/modules/job/driver/plan.go @@ -0,0 +1,76 @@ +package driver + +import ( + "time" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/job/config" +) + +const ( + KeyKubeDependency = "kube_cluster" + SuspendAction = "suspend" + StartAction = "start" +) + +const ( + Create PendingStep = "create" + Suspend PendingStep = "suspend" + Delete PendingStep = "delete" + Start PendingStep = "start" +) + +type ( + PendingStep string + IgnoreError bool + TransientData struct { + PendingSteps []PendingStep `json:"pending_steps"` + } +) + +func (driver *Driver) planCreate(exr module.ExpandedResource, act module.ActionRequest) (*resource.Resource, error) { + conf, err := config.ReadConfig(exr.Resource, act.Params, driver.Conf) + if err != nil { + return nil, err + } + return planPendingWithConf(conf, exr, []PendingStep{Create}) +} + +func planPendingWithConf(conf *config.Config, exr module.ExpandedResource, steps []PendingStep) (*resource.Resource, error) { + immediately := time.Now() + exr.Resource.Spec.Configs = modules.MustJSON(conf) + exr.Resource.State = resource.State{ + Status: resource.StatusPending, + Output: modules.MustJSON(Output{ + Namespace: conf.Namespace, + JobName: conf.Name, + }), + NextSyncAt: &immediately, + ModuleData: modules.MustJSON(TransientData{ + PendingSteps: steps, + }), + } + return &exr.Resource, nil +} + +func (driver *Driver) planPendingWithExistingResource(exr module.ExpandedResource, step []PendingStep) (*resource.Resource, error) { + conf, err := config.ReadConfig(exr.Resource, exr.Resource.Spec.Configs, driver.Conf) + if err != nil { + return nil, err + } + return planPendingWithConf(conf, exr, step) +} + +func (driver *Driver) planDelete(exr module.ExpandedResource) (*resource.Resource, error) { + return driver.planPendingWithExistingResource(exr, []PendingStep{Delete}) +} + +func (driver *Driver) planSuspend(exr module.ExpandedResource) (*resource.Resource, error) { + return driver.planPendingWithExistingResource(exr, []PendingStep{Suspend}) +} + +func (driver *Driver) planStart(exr module.ExpandedResource) (*resource.Resource, error) { + return driver.planPendingWithExistingResource(exr, []PendingStep{Start}) +} diff --git a/modules/job/driver/sync.go b/modules/job/driver/sync.go new file mode 100644 index 00000000..340247c6 --- /dev/null +++ b/modules/job/driver/sync.go @@ -0,0 +1,137 @@ +package driver + +import ( + "context" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/modules/job/config" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube/container" + "github.com/goto/entropy/pkg/kube/job" + "github.com/goto/entropy/pkg/kube/pod" + "github.com/goto/entropy/pkg/kube/volume" +) + +const ( + labelOrchestrator = "orchestrator" + labelName = "name" + orchestratorLabelValue = "entropy" + // Num retries before failing. + backoffLimit int32 = 0 +) + +func (driver *Driver) create(ctx context.Context, r resource.Resource, config *config.Config, out kubernetes.Output) error { + j := getJob(r, config) + if err := driver.CreateJob(ctx, out.Configs, j); err != nil { + return errors.ErrInternal.WithCausef(err.Error()) + } + return nil +} + +func (driver *Driver) suspend(ctx context.Context, config *config.Config, out kubernetes.Output) error { + j := &job.Job{Name: config.Name, Namespace: config.Namespace} + if err := driver.SuspendJob(ctx, out.Configs, j); err != nil { + return errors.ErrInternal.WithCausef(err.Error()) + } + return nil +} + +func (driver *Driver) delete(ctx context.Context, config *config.Config, out kubernetes.Output) error { + j := &job.Job{Name: config.Name, Namespace: config.Namespace} + if err := driver.DeleteJob(ctx, out.Configs, j); err != nil { + return errors.ErrInternal.WithCausef(err.Error()) + } + return nil +} + +func (driver *Driver) start(ctx context.Context, config *config.Config, out kubernetes.Output) error { + j := &job.Job{Name: config.Name, Namespace: config.Namespace} + if err := driver.StartJob(ctx, out.Configs, j); err != nil { + return errors.ErrInternal.WithCausef(err.Error()) + } + return nil +} + +func getJob(res resource.Resource, conf *config.Config) *job.Job { + constantLabels := map[string]string{ + labelOrchestrator: orchestratorLabelValue, + labelName: res.Name, + } + + var volumes []volume.Volume + for _, v := range conf.Volumes { + k := volume.Secret + if v.Kind == "configMap" { + k = volume.ConfigMap + } + volumes = append(volumes, volume.Volume{ + Kind: k, + Name: v.Name, + SourceName: v.Name, + }) + } + var containers []container.Container + for _, c := range conf.Containers { + var vm []container.VolumeMount + for _, s := range c.SecretsVolumes { + vm = append(vm, container.VolumeMount{ + Name: s.Name, + MountPath: s.Mount, + }) + } + for _, cm := range c.ConfigMapsVolumes { + vm = append(vm, container.VolumeMount{ + Name: cm.Name, + MountPath: cm.Mount, + }) + } + containers = append(containers, container.Container{ + Image: c.Image, + Name: c.Name, + EnvConfigMaps: c.EnvConfigMaps, + Command: c.Command, + Args: c.Args, + EnvMap: c.EnvVariables, + ImagePullPolicy: c.ImagePullPolicy, + VolumeMounts: vm, + PreStopCmd: c.PreStopCmd, + PostStartCmd: c.PostStartCmd, + Requests: map[string]string{"cpu": c.Requests.CPU, "memory": c.Requests.Memory}, + Limits: map[string]string{"cpu": c.Limits.CPU, "memory": c.Limits.Memory}, + }) + } + p := &pod.Pod{ + Name: conf.Name, + Containers: containers, + Volumes: volumes, + // This label is to support `app` filter on pod for getting the logs until we find better solution + Labels: map[string]string{"app": conf.Name}, + } + limit := backoffLimit + j := &job.Job{ + Pod: p, + Name: conf.Name, + Namespace: conf.Namespace, + Labels: modules.CloneAndMergeMaps(constantLabels, conf.JobLabels), + Parallelism: &conf.Replicas, + BackOffList: &limit, + TTLSeconds: conf.TTLSeconds, + } + return j +} + +func ReadTransientData(exr module.ExpandedResource) (*TransientData, error) { + if len(exr.Resource.State.ModuleData) == 0 { + return &TransientData{}, nil + } + + var modData TransientData + if err := json.Unmarshal(exr.Resource.State.ModuleData, &modData); err != nil { + return nil, errors.ErrInternal.WithMsgf("corrupted transient data").WithCausef(err.Error()) + } + return &modData, nil +} diff --git a/modules/job/module.go b/modules/job/module.go new file mode 100644 index 00000000..217379bb --- /dev/null +++ b/modules/job/module.go @@ -0,0 +1,151 @@ +package job + +import ( + "context" + "encoding/json" + + v1 "k8s.io/api/core/v1" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules/job/config" + "github.com/goto/entropy/modules/job/driver" + "github.com/goto/entropy/modules/kubernetes" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" + "github.com/goto/entropy/pkg/kube/job" + "github.com/goto/entropy/pkg/validator" +) + +var defaultDriverConf = config.DriverConf{ + Namespace: config.Default, + RequestsAndLimits: map[string]config.RequestsAndLimits{ + config.Default: { + Limits: config.UsageSpec{ + CPU: "1", + Memory: "2000Mi", + }, + Requests: config.UsageSpec{ + CPU: "1", + Memory: "2000Mi", + }, + }, + }, +} + +var Module = module.Descriptor{ + Kind: "job", + Dependencies: map[string]string{ + driver.KeyKubeDependency: kubernetes.Module.Kind, + }, + Actions: []module.ActionDesc{ + { + Name: module.CreateAction, + Description: "Creates a new Kube job.", + }, + { + Name: driver.SuspendAction, + Description: "Suspend the kube Job.", + }, + { + Name: driver.StartAction, + Description: "Start the kube Job.", + }, + { + Name: module.DeleteAction, + Description: "Delete the kube Job.", + }, + }, + DriverFactory: func(confJSON json.RawMessage) (module.Driver, error) { + conf := defaultDriverConf + if err := json.Unmarshal(confJSON, &conf); err != nil { + return nil, err + } else if err := validator.TaggedStruct(conf); err != nil { + return nil, err + } + return &driver.Driver{ + Conf: conf, + CreateJob: func(ctx context.Context, conf kube.Config, j *job.Job) error { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return errors.ErrInternal.WithMsgf("failed to create new kube client on job driver").WithCausef(err.Error()) + } + processor, err := kubeCl.GetJobProcessor(j) + if err != nil { + return err + } + return processor.SubmitJob() + }, + SuspendJob: func(ctx context.Context, conf kube.Config, j *job.Job) error { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return errors.ErrInternal.WithMsgf("failed to suspend the job").WithCausef(err.Error()) + } + processor, err := kubeCl.GetJobProcessor(j) + if err != nil { + return err + } + return processor.UpdateJob(true) + }, + DeleteJob: func(ctx context.Context, conf kube.Config, j *job.Job) error { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return errors.ErrInternal.WithMsgf("failed to delete the job").WithCausef(err.Error()) + } + processor, err := kubeCl.GetJobProcessor(j) + if err != nil { + return err + } + return processor.DeleteJob() + }, + StartJob: func(ctx context.Context, conf kube.Config, j *job.Job) error { + kubeCl, err := kube.NewClient(ctx, conf) + if err != nil { + return errors.ErrInternal.WithMsgf("failed to start the job").WithCausef(err.Error()) + } + processor, err := kubeCl.GetJobProcessor(j) + if err != nil { + return err + } + return processor.UpdateJob(false) + }, + GetJobPods: func(ctx context.Context, kubeConf kube.Config, j *job.Job, labels map[string]string) ([]kube.Pod, error) { + kubeCl, err := kube.NewClient(ctx, kubeConf) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("failed to create new kube client on driver").WithCausef(err.Error()) + } + return kubeCl.GetPodDetails(ctx, j.Namespace, labels, func(pod v1.Pod) bool { + // allow all pods + return true + }) + }, + StreamLogs: func(ctx context.Context, kubeConf kube.Config, j *job.Job, filter map[string]string) (<-chan module.LogChunk, error) { + kubeCl, err := kube.NewClient(ctx, kubeConf) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("failed to create new kube client on firehose driver Log").WithCausef(err.Error()) + } + + logs, err := kubeCl.StreamLogs(ctx, j.Namespace, filter) + if err != nil { + return nil, err + } + + mappedLogs := make(chan module.LogChunk) + go func() { + defer close(mappedLogs) + for { + select { + case log, ok := <-logs: + if !ok { + return + } + mappedLogs <- module.LogChunk{Data: log.Data, Labels: log.Labels} + case <-ctx.Done(): + return + } + } + }() + return mappedLogs, err + }, + }, nil + }, +} diff --git a/modules/kafka/config.go b/modules/kafka/config.go new file mode 100644 index 00000000..b9fb1784 --- /dev/null +++ b/modules/kafka/config.go @@ -0,0 +1,68 @@ +package kafka + +import ( + _ "embed" + "encoding/json" + + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/validator" +) + +var ( + //go:embed schema/config.json + configSchemaRaw []byte + validateConfig = validator.FromJSONSchema(configSchemaRaw) +) + +type Config struct { + Entity string `json:"entity,omitempty"` + Environment string `json:"environment,omitempty"` + Landscape string `json:"landscape,omitempty"` + Organization string `json:"organization,omitempty"` + AdvertiseMode AdvertiseMode `json:"advertise_mode"` + Brokers []Broker `json:"brokers,omitempty"` + Type string `json:"type"` +} + +type AdvertiseMode struct { + Host string `json:"host"` + Address string `json:"address"` +} + +type Broker struct { + Name string `json:"name"` + Host string `json:"host"` + Address string `json:"address"` +} + +func readConfig(res resource.Resource, confJSON json.RawMessage, dc driverConf) (*Config, error) { + cfg := Config{ + Type: dc.Type, + Entity: dc.Entity, + Organization: dc.Organization, + Landscape: dc.Landscape, + Environment: dc.Environment, + } + + if res.Spec.Configs != nil { + if err := json.Unmarshal(res.Spec.Configs, &cfg); err != nil { + return nil, errors.ErrInvalid.WithMsgf("failed to unmarshal").WithCausef(err.Error()) + } + } + + if err := json.Unmarshal(confJSON, &cfg); err != nil { + return nil, errors.ErrInvalid.WithMsgf("failed to unmarshal").WithCausef(err.Error()) + } + + newConfJSON, err := json.Marshal(cfg) + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("failed to marshal").WithCausef(err.Error()) + } + + if err := validateConfig(newConfJSON); err != nil { + return nil, err + } + + return &cfg, nil +} diff --git a/modules/kafka/driver.go b/modules/kafka/driver.go new file mode 100644 index 00000000..20ea9d86 --- /dev/null +++ b/modules/kafka/driver.go @@ -0,0 +1,98 @@ +package kafka + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/modules" +) + +var defaultDriverConf = driverConf{ + Type: "source", +} + +type kafkaDriver struct { + conf driverConf +} + +type Output struct { + URL string `json:"url"` +} + +type driverConf struct { + Type string `json:"type"` + Entity string `json:"entity"` + Organization string `json:"organization"` + Landscape string `json:"landscape"` + Environment string `json:"environment"` +} + +func (m *kafkaDriver) Plan(ctx context.Context, res module.ExpandedResource, + act module.ActionRequest, +) (*resource.Resource, error) { + cfg, err := readConfig(res.Resource, act.Params, m.conf) + if err != nil { + return nil, err + } + + res.Resource.Spec = resource.Spec{ + Configs: modules.MustJSON(cfg), + Dependencies: nil, + } + + res.Resource.State = resource.State{ + Status: resource.StatusCompleted, + Output: modules.MustJSON(Output{ + URL: mapUrl(cfg), + }), + } + + return &res.Resource, nil +} + +func (*kafkaDriver) Sync(_ context.Context, res module.ExpandedResource) (*resource.State, error) { + return &resource.State{ + Status: resource.StatusCompleted, + Output: res.Resource.State.Output, + ModuleData: nil, + }, nil +} + +func (m *kafkaDriver) Output(ctx context.Context, res module.ExpandedResource) (json.RawMessage, error) { + cfg, err := readConfig(res.Resource, res.Resource.Spec.Configs, m.conf) + if err != nil { + return nil, err + } + + return modules.MustJSON(Output{ + URL: mapUrl(cfg), + }), nil +} + +func mapUrl(cfg *Config) string { + var mode, port string + if cfg.AdvertiseMode.Address != "" { + mode = "address" + port = cfg.AdvertiseMode.Address + } else { + mode = "host" + port = cfg.AdvertiseMode.Host + } + + var urls []string + for _, broker := range cfg.Brokers { + var addr string + if mode == "address" { + addr = broker.Address + } else { + addr = broker.Host + } + urls = append(urls, fmt.Sprintf("%s:%s", addr, port)) + } + + return strings.Join(urls, ",") +} diff --git a/modules/kafka/module.go b/modules/kafka/module.go new file mode 100644 index 00000000..9c042da4 --- /dev/null +++ b/modules/kafka/module.go @@ -0,0 +1,29 @@ +package kafka + +import ( + "encoding/json" + + "github.com/goto/entropy/core/module" +) + +var Module = module.Descriptor{ + Kind: "kafka", + Actions: []module.ActionDesc{ + { + Name: module.CreateAction, + }, + { + Name: module.UpdateAction, + }, + }, + DriverFactory: func(confJSON json.RawMessage) (module.Driver, error) { + conf := defaultDriverConf + if err := json.Unmarshal(confJSON, &conf); err != nil { + return nil, err + } + + return &kafkaDriver{ + conf: conf, + }, nil + }, +} diff --git a/modules/kafka/schema/config.json b/modules/kafka/schema/config.json new file mode 100644 index 00000000..3977ae43 --- /dev/null +++ b/modules/kafka/schema/config.json @@ -0,0 +1,53 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": ["type", "brokers"], + "properties": { + "type": { + "type": "string" + }, + "advertise_mode": { + "type": "object", + "additionalProperties": true, + "properties": { + "host": { + "type": "string" + }, + "address": { + "type": "string" + } + } + }, + "brokers": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "host": { + "type": "string" + }, + "address": { + "type": "string" + } + }, + "required": ["name", "host", "address"] + } + }, + "entity": { + "type": "string" + }, + "environment": { + "type": "string" + }, + "landscape": { + "type": "string" + }, + "organization": { + "type": "string" + } + } +} \ No newline at end of file diff --git a/modules/kubernetes/config_schema.json b/modules/kubernetes/config_schema.json deleted file mode 100644 index 89e55ed5..00000000 --- a/modules/kubernetes/config_schema.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "host": { - "type": "string", - "format": "uri" - }, - "insecure": { - "type": "boolean", - "default": false - }, - "token": { - "type": "string" - }, - "client_key": { - "type": "string" - }, - "client_certificate": { - "type": "string" - }, - "client_ca_certificate": { - "type": "string" - } - }, - "required": [ - "host" - ], - "anyOf": [ - { - "required": [ - "token" - ] - }, - { - "required": [ - "client_key", - "client_certificate" - ], - "if": { - "not": { - "properties": { - "insecure": { - "const": true - } - } - } - }, - "then": { - "required": [ - "client_ca_certificate" - ] - } - } - ] -} \ No newline at end of file diff --git a/modules/kubernetes/config_schema_test.go b/modules/kubernetes/config_schema_test.go deleted file mode 100644 index ab3e8a58..00000000 --- a/modules/kubernetes/config_schema_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package kubernetes - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/xeipuuv/gojsonschema" -) - -func TestModule_KubernetesJSONSchema(t *testing.T) { - tests := []struct { - title string - Case string - shouldBeValid bool - }{ - { - title: "TokenAuthPresent_InsecureTrue", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": true, - "token": "token" - }`, - shouldBeValid: true, - }, - { - title: "TokenAuthPresent_InsecureFalse", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": false, - "token": "foo" - }`, - shouldBeValid: true, - }, - { - title: "TokenAuthPresent_CertIsPresentToo", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": false, - "token": "token", - "cluster_certificate": "c_ca_cert" - }`, - shouldBeValid: true, - }, - { - title: "CertAuthPresent_InsecureTrue", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": true, - "client_key": "c_key", - "client_certificate": "c_cert" - }`, - shouldBeValid: true, - }, - { - title: "CertAuthPresent_InsecureFalse", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": false, - "client_key": "c_key", - "client_certificate": "c_cert" - }`, - shouldBeValid: false, - }, - - { - title: "CertAuthPresent_InsecureFalse_WithCACert", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": false, - "client_key": "c_key", - "client_certificate": "c_cert", - "client_ca_certificate": "ca_cert" - }`, - shouldBeValid: true, - }, - { - title: "Missing_ClientCert", - Case: `{ - "host": "http://0.0.0.0:1234", - "client_key": "c_key" - }`, - shouldBeValid: false, - }, - { - title: "Missing_ClientKey", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": true, - "client_certificate": "c_cert" - }`, - shouldBeValid: false, - }, - { - title: "Missing_CACert", - Case: `{ - "host": "http://0.0.0.0:1234", - "insecure": false, - "client_key": "foo", - "client_certificate": "c_cert" - }`, - shouldBeValid: false, - }, - } - - schema, err := gojsonschema.NewSchema(gojsonschema.NewStringLoader(configSchema)) - require.NoError(t, err) - - for _, tt := range tests { - tt := tt - t.Run(tt.title, func(t *testing.T) { - t.Parallel() - - c := gojsonschema.NewStringLoader(tt.Case) - result, err := schema.Validate(c) - require.NoError(t, err) - assert.Equal(t, tt.shouldBeValid, result.Valid()) - }) - } -} diff --git a/modules/kubernetes/driver.go b/modules/kubernetes/driver.go new file mode 100644 index 00000000..2b185b29 --- /dev/null +++ b/modules/kubernetes/driver.go @@ -0,0 +1,82 @@ +package kubernetes + +import ( + "context" + "encoding/json" + + "k8s.io/client-go/kubernetes" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/core/resource" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +type kubeDriver struct { + TolerationMode map[string]string `json:"toleration_mode"` + Tolerations map[string][]Toleration `json:"tolerations"` + AffinityMode map[string]string `json:"affinity_mode"` + Affinities map[string]NodeAffinityMatchExpressions `json:"affinities"` +} + +func (m *kubeDriver) Plan(ctx context.Context, res module.ExpandedResource, + act module.ActionRequest, +) (*resource.Resource, error) { + res.Resource.Spec = resource.Spec{ + Configs: act.Params, + Dependencies: nil, + } + + output, err := m.Output(ctx, res) + if err != nil { + return nil, err + } + + res.Resource.State = resource.State{ + Status: resource.StatusCompleted, + Output: output, + } + + return &res.Resource, nil +} + +func (*kubeDriver) Sync(_ context.Context, res module.ExpandedResource) (*resource.State, error) { + return &resource.State{ + Status: resource.StatusCompleted, + Output: res.Resource.State.Output, + ModuleData: nil, + }, nil +} + +func (m *kubeDriver) Output(ctx context.Context, res module.ExpandedResource) (json.RawMessage, error) { + conf := kube.DefaultClientConfig() + if err := json.Unmarshal(res.Spec.Configs, &conf); err != nil { + return nil, errors.ErrInvalid.WithMsgf("invalid json config value").WithCausef(err.Error()) + } else if err := conf.Sanitise(); err != nil { + return nil, err + } + + restConfig, err := conf.RESTConfig(ctx) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("failed to create new kube client on kube driver output").WithCausef(err.Error()) + } + + clientSet, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("failed to create client: %v", err) + } + + info, err := clientSet.ServerVersion() + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("failed to fetch server info: %v", err) + } + + return Output{ + Configs: conf, + ServerInfo: *info, + TolerationMode: m.TolerationMode, + Tolerations: m.Tolerations, + AffinityMode: m.AffinityMode, + Affinities: m.Affinities, + }.JSON(), nil +} diff --git a/modules/kubernetes/kubernetes.go b/modules/kubernetes/kubernetes.go deleted file mode 100644 index 733899b9..00000000 --- a/modules/kubernetes/kubernetes.go +++ /dev/null @@ -1,98 +0,0 @@ -package kubernetes - -import ( - "context" - _ "embed" - "encoding/json" - - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes" - - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/core/resource" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/kube" -) - -//go:embed config_schema.json -var configSchema string - -var Module = module.Descriptor{ - Kind: "kubernetes", - Actions: []module.ActionDesc{ - { - Name: module.CreateAction, - ParamSchema: configSchema, - }, - { - Name: module.UpdateAction, - ParamSchema: configSchema, - }, - }, - DriverFactory: func(conf json.RawMessage) (module.Driver, error) { - return &kubeModule{}, nil - }, -} - -type kubeModule struct{} - -type Output struct { - Configs kube.Config `json:"configs"` - ServerInfo version.Info `json:"server_info"` -} - -func (m *kubeModule) Plan(ctx context.Context, res module.ExpandedResource, act module.ActionRequest) (*module.Plan, error) { - res.Resource.Spec = resource.Spec{ - Configs: act.Params, - Dependencies: nil, - } - - output, err := m.Output(ctx, res) - if err != nil { - return nil, err - } - - res.Resource.State = resource.State{ - Status: resource.StatusCompleted, - Output: output, - } - return &module.Plan{Resource: res.Resource, Reason: "kubernetes cluster details updated"}, nil -} - -func (*kubeModule) Sync(_ context.Context, res module.ExpandedResource) (*resource.State, error) { - return &resource.State{ - Status: resource.StatusCompleted, - Output: res.Resource.State.Output, - ModuleData: nil, - }, nil -} - -func (*kubeModule) Output(_ context.Context, res module.ExpandedResource) (json.RawMessage, error) { - conf := kube.DefaultClientConfig() - if err := json.Unmarshal(res.Spec.Configs, &conf); err != nil { - return nil, errors.ErrInvalid.WithMsgf("invalid json config value").WithCausef(err.Error()) - } - - clientSet, err := kubernetes.NewForConfig(conf.RESTConfig()) - if err != nil { - return nil, errors.ErrInvalid.WithMsgf("failed to create client: %v", err) - } - - info, err := clientSet.ServerVersion() - if err != nil { - return nil, errors.ErrInvalid.WithMsgf("failed to fetch server info: %v", err) - } - - return Output{ - Configs: conf, - ServerInfo: *info, - }.JSON(), nil -} - -func (out Output) JSON() []byte { - b, err := json.Marshal(out) - if err != nil { - panic(err) - } - return b -} diff --git a/modules/kubernetes/module.go b/modules/kubernetes/module.go new file mode 100644 index 00000000..ea28762e --- /dev/null +++ b/modules/kubernetes/module.go @@ -0,0 +1,29 @@ +package kubernetes + +import ( + _ "embed" + "encoding/json" + + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/pkg/errors" +) + +var Module = module.Descriptor{ + Kind: "kubernetes", + Actions: []module.ActionDesc{ + { + Name: module.CreateAction, + }, + { + Name: module.UpdateAction, + }, + }, + DriverFactory: func(conf json.RawMessage) (module.Driver, error) { + kd := &kubeDriver{} + err := json.Unmarshal(conf, &kd) + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("failed to unmarshal module config: %v", err) + } + return kd, nil + }, +} diff --git a/modules/kubernetes/output.go b/modules/kubernetes/output.go new file mode 100644 index 00000000..70d5cc42 --- /dev/null +++ b/modules/kubernetes/output.go @@ -0,0 +1,49 @@ +package kubernetes + +import ( + "encoding/json" + + "k8s.io/apimachinery/pkg/version" + + "github.com/goto/entropy/pkg/kube" +) + +type Output struct { + Configs kube.Config `json:"configs"` + ServerInfo version.Info `json:"server_info"` + TolerationMode map[string]string `json:"toleration_mode"` + Tolerations map[string][]Toleration `json:"tolerations"` + AffinityMode map[string]string `json:"affinity_mode"` + Affinities map[string]NodeAffinityMatchExpressions `json:"affinities"` +} + +type Toleration struct { + Key string `json:"key"` + Value string `json:"value"` + Effect string `json:"effect"` + Operator string `json:"operator"` +} + +type NodeAffinityMatchExpressions struct { + RequiredDuringSchedulingIgnoredDuringExecution []Preference `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPreference `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +type WeightedPreference struct { + Weight int `json:"weight" validate:"required"` + Preference []Preference `json:"preference" validate:"required"` +} + +type Preference struct { + Key string `json:"key" validate:"required"` + Operator string `json:"operator" validate:"required"` + Values []string `json:"values"` +} + +func (out Output) JSON() []byte { + b, err := json.Marshal(out) + if err != nil { + panic(err) + } + return b +} diff --git a/modules/registry.go b/modules/registry.go index d507850c..6853399c 100644 --- a/modules/registry.go +++ b/modules/registry.go @@ -5,8 +5,8 @@ import ( "reflect" "sync" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/pkg/errors" ) // Registry maintains a list of supported/enabled modules. diff --git a/modules/registry_test.go b/modules/registry_test.go index 7e4b388a..08e160e3 100644 --- a/modules/registry_test.go +++ b/modules/registry_test.go @@ -9,10 +9,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/odpf/entropy/core/mocks" - "github.com/odpf/entropy/core/module" - "github.com/odpf/entropy/modules" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/core/mocks" + "github.com/goto/entropy/core/module" + "github.com/goto/entropy/modules" + "github.com/goto/entropy/pkg/errors" ) func TestRegistry_GetDriver(t *testing.T) { @@ -70,10 +70,10 @@ func TestRegistry_GetDriver(t *testing.T) { func TestRegistry_Register(t *testing.T) { t.Parallel() - reg := &modules.Registry{} t.Run("FirstRegistration_NoError", func(t *testing.T) { t.Parallel() + reg := &modules.Registry{} desc := module.Descriptor{ Kind: "foo", DriverFactory: func(conf json.RawMessage) (module.Driver, error) { @@ -92,6 +92,7 @@ func TestRegistry_Register(t *testing.T) { t.Run("SecondRegistration_Conflict", func(t *testing.T) { t.Parallel() + reg := &modules.Registry{} desc := module.Descriptor{ Kind: "foo", DriverFactory: func(conf json.RawMessage) (module.Driver, error) { @@ -99,6 +100,10 @@ func TestRegistry_Register(t *testing.T) { }, } + // first attempt. + assert.NoError(t, reg.Register(desc)) + + // second attempt. err := reg.Register(desc) assert.Error(t, err) assert.True(t, errors.Is(err, errors.ErrConflict)) @@ -119,6 +124,7 @@ func TestRegistry_Register(t *testing.T) { }, }, } + reg := &modules.Registry{} got := reg.Register(desc) assert.Error(t, got) assert.True(t, errors.Is(got, errors.ErrInvalid), cmp.Diff(got, errors.ErrInvalid)) diff --git a/modules/utils.go b/modules/utils.go new file mode 100644 index 00000000..9c5a5d04 --- /dev/null +++ b/modules/utils.go @@ -0,0 +1,89 @@ +package modules + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strings" +) + +const ( + MAX_NAME_LIMIT = 63 + RESOURCE_NAME_HASH_LEN = 8 +) + +func CloneAndMergeMaps(m1, m2 map[string]string) map[string]string { + res := map[string]string{} + for k, v := range m1 { + res[k] = v + } + for k, v := range m2 { + res[k] = v + } + return res +} + +func MustJSON(v any) json.RawMessage { + b, err := json.Marshal(v) + if err != nil { + panic(err) + } + return b +} + +func SafeName(name string, suffix string, maxLen int) string { + const randomHashLen = 6 + // remove suffix if already there. + name = strings.TrimSuffix(name, suffix) + + if len(name) <= maxLen-len(suffix) { + return name + suffix + } + + val := sha256.Sum256([]byte(name)) + hash := fmt.Sprintf("%x", val) + suffix = fmt.Sprintf("-%s%s", hash[:randomHashLen], suffix) + + // truncate and make room for the suffix. also trim any leading, trailing + // hyphens to prevent '--' (not allowed in deployment names). + truncLen := maxLen - len(suffix) + truncated := name[0:truncLen] + truncated = strings.Trim(truncated, "-") + return truncated + suffix +} + +func slug(input string) string { + return strings.ToLower( + regexp.MustCompile(`[^\w -]+`).ReplaceAllString( + regexp.MustCompile(` +`).ReplaceAllString( + regexp.MustCompile(`_+`).ReplaceAllString(input, "-"), + "-"), + "")) +} + +func BuildResourceName(kind, name, projectID string, limit int) string { + if limit == 0 { + limit = MAX_NAME_LIMIT + } + + nameComponents := []string{projectID, name, kind} + sluggedName := slug(name) + initialName := slug(strings.Join(nameComponents, "-")) + charSizeToRemove := limit - len(initialName) + hashLength := RESOURCE_NAME_HASH_LEN + + hash := md5.Sum([]byte(sluggedName)) + hashStr := hex.EncodeToString(hash[:])[:hashLength] + + var qualifiedName string + if charSizeToRemove >= 0 { + qualifiedName = sluggedName + } else { + qualifiedName = fmt.Sprintf("%s-%s", sluggedName[:len(sluggedName)+(charSizeToRemove+-(hashLength+1))], hashStr) + } + + return slug(strings.Join([]string{projectID, qualifiedName, kind}, "-")) +} diff --git a/modules/utils_test.go b/modules/utils_test.go new file mode 100644 index 00000000..c3d26498 --- /dev/null +++ b/modules/utils_test.go @@ -0,0 +1,20 @@ +package modules + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" +) + +func TestCloneAndMergeMaps(t *testing.T) { + m1 := map[string]string{"a1": "b1", "a2": "b2"} + m2 := map[string]string{"a3": "b3"} + m3 := CloneAndMergeMaps(m1, m2) + assert.Assert(t, cmp.Equal(map[string]string{"a1": "b1", "a2": "b2", "a3": "b3"}, m3)) +} + +func TestSafeName(t *testing.T) { + sName := SafeName("testing-1232-end", "-name", 50) + assert.Equal(t, "testing-1232-end-name", sName) +} diff --git a/pkg/common/common.go b/pkg/common/common.go new file mode 100644 index 00000000..3d1d59b3 --- /dev/null +++ b/pkg/common/common.go @@ -0,0 +1,25 @@ +package common + +import ( + "context" + + commonv1 "github.com/goto/entropy/proto/gotocompany/common/v1" +) + +type CommonService struct { + commonv1.UnimplementedCommonServiceServer + version *commonv1.Version +} + +func New(version *commonv1.Version) *CommonService { + return &CommonService{ + version: version, + } +} + +//nolint:unparam +func (c *CommonService) GetVersion(context.Context, *commonv1.GetVersionRequest) (*commonv1.GetVersionResponse, error) { + return &commonv1.GetVersionResponse{ + Server: c.version, + }, nil +} diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index c29767fa..0bd6dd97 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -3,7 +3,6 @@ package errors import ( "errors" "fmt" - "strings" ) // These aliased values are added to avoid conflicting imports of standard `errors` @@ -16,11 +15,11 @@ var ( // Common error categories. Use `ErrX.WithXXX()` to clone and add context. var ( - ErrInvalid = Error{Code: "bad_request", Message: "Request is not valid"} - ErrNotFound = Error{Code: "not_found", Message: "Requested entity not found"} - ErrConflict = Error{Code: "conflict", Message: "An entity with conflicting identifier exists"} - ErrInternal = Error{Code: "internal_error", Message: "Some unexpected error occurred"} - ErrUnsupported = Error{Code: "unsupported", Message: "Requested feature is not supported"} + ErrInvalid = Error{Code: "bad_request", Message: "request is not valid"} + ErrNotFound = Error{Code: "not_found", Message: "requested entity not found"} + ErrConflict = Error{Code: "conflict", Message: "an entity with conflicting identifier exists"} + ErrInternal = Error{Code: "internal_error", Message: "some unexpected error occurred"} + ErrUnsupported = Error{Code: "unsupported", Message: "requested feature is not supported"} ) // Error represents any error returned by the Entropy components along with any @@ -54,7 +53,7 @@ func (err Error) WithMsgf(format string, args ...interface{}) Error { // Is checks if 'other' is of type Error and has the same code. // See https://blog.golang.org/go1.13-errors. func (err Error) Is(other error) bool { - if oe, ok := other.(Error); ok { // nolint + if oe, ok := other.(Error); ok { return oe.Code == err.Code } @@ -63,10 +62,14 @@ func (err Error) Is(other error) bool { } func (err Error) Error() string { + msg := err.Code if err.Message != "" { - return strings.ToLower(err.Message) + msg += ": " + err.Message } - return fmt.Sprintf("%s: %s", err.Code, err.Cause) + if err.Cause != "" { + msg += ": " + err.Cause + } + return msg } // Errorf returns a formatted error similar to `fmt.Errorf` but uses the @@ -95,12 +98,3 @@ func E(err error) Error { } return ErrInternal.WithCausef(err.Error()) } - -// Verbose returns a verbose error value. -func Verbose(err error) error { - var e Error - if errors.As(err, &e) { - return e.WithMsgf("%s: %s (cause: %s)", e.Code, e.Message, e.Cause) - } - return err -} diff --git a/pkg/errors/errors_test.go b/pkg/errors/errors_test.go index 8665e046..3ac96ca1 100644 --- a/pkg/errors/errors_test.go +++ b/pkg/errors/errors_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) func Test_E(t *testing.T) { @@ -16,7 +16,7 @@ func Test_E(t *testing.T) { want := errors.Error{ Code: "internal_error", Cause: "some native error", - Message: "Some unexpected error occurred", + Message: "some unexpected error occurred", } err := errors.New("some native error") @@ -41,31 +41,11 @@ func Test_E(t *testing.T) { }) } -func Test_Verbose(t *testing.T) { - t.Parallel() - - t.Run("NonError", func(t *testing.T) { - err := errors.New("some native error") - - got := errors.Verbose(err) - assert.EqualError(t, got, "some native error") - }) - - t.Run("CustomError", func(t *testing.T) { - err := errors.ErrInvalid. - WithMsgf("request is not valid"). - WithCausef("invalid parameter value") - - got := errors.Verbose(err) - assert.EqualError(t, got, "bad_request: request is not valid (cause: invalid parameter value)") - }) -} - func Test_Errorf(t *testing.T) { t.Parallel() e := errors.Errorf("failed: %d", 100) assert.Error(t, e) - assert.EqualError(t, e, "failed: 100") + assert.EqualError(t, e, "internal_error: failed: 100") } func Test_OneOf(t *testing.T) { @@ -92,12 +72,12 @@ func TestError_Error(t *testing.T) { { title: "WithoutCause", err: errors.ErrInvalid, - want: "request is not valid", + want: "bad_request: request is not valid", }, { title: "WithCause", - err: errors.ErrInvalid.WithMsgf("").WithCausef("foo"), - want: "bad_request: foo", + err: errors.ErrInvalid.WithMsgf("").WithCausef("input has bad field"), + want: "bad_request: input has bad field", }, } @@ -129,7 +109,7 @@ func TestError_Is(t *testing.T) { { title: "NonEntropyErr", err: errors.ErrInternal, - other: goerrors.New("foo"), // nolint + other: goerrors.New("foo"), want: true, }, { @@ -163,7 +143,7 @@ func TestError_WithCausef(t *testing.T) { err: errors.ErrInvalid.WithCausef("foo"), want: errors.Error{ Code: "bad_request", - Message: "Request is not valid", + Message: "request is not valid", Cause: "foo", }, }, @@ -172,7 +152,7 @@ func TestError_WithCausef(t *testing.T) { err: errors.ErrConflict.WithCausef("hello %s", "world"), want: errors.Error{ Code: "conflict", - Message: "An entity with conflicting identifier exists", + Message: "an entity with conflicting identifier exists", Cause: "hello world", }, }, diff --git a/pkg/helm/client.go b/pkg/helm/client.go deleted file mode 100644 index d8101976..00000000 --- a/pkg/helm/client.go +++ /dev/null @@ -1,63 +0,0 @@ -package helm - -import ( - "github.com/mcuadros/go-defaults" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/cli" - apimachineryschema "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" - - "github.com/odpf/entropy/pkg/kube" -) - -type Config struct { - // HelmDriver - The backend storage driver. Values are - configmap, secret, memory, sql - HelmDriver string `default:"secret"` - // Kubernetes configuration. - Kubernetes kube.Config -} - -type Client struct { - config *Config - cliSettings *cli.EnvSettings -} - -func DefaultClientConfig() *Config { - defaultProviderConfig := new(Config) - defaults.SetDefaults(defaultProviderConfig) - return defaultProviderConfig -} - -func NewClient(config *Config) *Client { - return &Client{config: config, cliSettings: cli.New()} -} - -func (p *Client) getActionConfiguration(namespace string) (*action.Configuration, error) { - actionConfig := new(action.Configuration) - - overrides := &clientcmd.ConfigOverrides{} - - overrides.AuthInfo.ClientCertificateData = []byte(p.config.Kubernetes.ClientCertificate) - overrides.AuthInfo.ClientKeyData = []byte(p.config.Kubernetes.ClientKey) - overrides.AuthInfo.Token = p.config.Kubernetes.Token - overrides.ClusterInfo.CertificateAuthorityData = []byte(p.config.Kubernetes.ClusterCACertificate) - overrides.ClusterInfo.InsecureSkipTLSVerify = p.config.Kubernetes.Insecure - - hasCA := len(overrides.ClusterInfo.CertificateAuthorityData) != 0 - hasCert := len(overrides.AuthInfo.ClientCertificateData) != 0 - defaultTLS := hasCA || hasCert || overrides.ClusterInfo.InsecureSkipTLSVerify - host, _, err := rest.DefaultServerURL(p.config.Kubernetes.Host, "", apimachineryschema.GroupVersion{}, defaultTLS) - if err != nil { - return nil, err - } - overrides.ClusterInfo.Server = host.String() - - clientConfig := clientcmd.NewDefaultClientConfig(*api.NewConfig(), overrides) - - if err := actionConfig.Init(&KubeConfig{ClientConfig: clientConfig}, namespace, p.config.HelmDriver, func(format string, v ...interface{}) {}); err != nil { - return nil, err - } - return actionConfig, nil -} diff --git a/pkg/helm/kube_rest.go b/pkg/helm/config.go similarity index 56% rename from pkg/helm/kube_rest.go rename to pkg/helm/config.go index 8741df20..b966ff82 100644 --- a/pkg/helm/kube_rest.go +++ b/pkg/helm/config.go @@ -1,47 +1,49 @@ package helm import ( - "sync" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/discovery" memcached "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" "k8s.io/client-go/tools/clientcmd" + + "github.com/goto/entropy/pkg/kube" ) -// KubeConfig is a RESTClientGetter interface implementation +// Config contains Helm CLI configuration parameters. +type Config struct { + HelmDriver string `default:"secret"` // values: configmap/secret/memory/sql + Kubernetes kube.Config +} + +// kubeClientGetter is a RESTClientGetter interface implementation // comes from https://github.com/hashicorp/terraform-provider-helm -type KubeConfig struct { +type kubeClientGetter struct { ClientConfig clientcmd.ClientConfig - - sync.Mutex } -// ToRESTConfig implemented interface method. -func (k *KubeConfig) ToRESTConfig() (*rest.Config, error) { +func (k *kubeClientGetter) ToRESTConfig() (*rest.Config, error) { config, err := k.ToRawKubeConfigLoader().ClientConfig() return config, err } -// ToDiscoveryClient implemented interface method. -func (k *KubeConfig) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { +func (k *kubeClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { config, err := k.ToRESTConfig() if err != nil { return nil, err } // The more groups you have, the more discovery requests you need to make. - // given 25 groups (our groups + a few custom resources) with one-ish version each, discovery needs to make 50 requests - // double it just so we don't end up here again for a while. This config is only used for discovery. + // given 25 groups (our groups + a few custom resources) with one-ish version + // each, discovery needs to make 50 requests double it just so we don't end + // up here again for a while. This config is only used for discovery. config.Burst = 100 return memcached.NewMemCacheClient(discovery.NewDiscoveryClientForConfigOrDie(config)), nil } -// ToRESTMapper implemented interface method. -func (k *KubeConfig) ToRESTMapper() (meta.RESTMapper, error) { +func (k *kubeClientGetter) ToRESTMapper() (meta.RESTMapper, error) { discoveryClient, err := k.ToDiscoveryClient() if err != nil { return nil, err @@ -52,7 +54,6 @@ func (k *KubeConfig) ToRESTMapper() (meta.RESTMapper, error) { return expander, nil } -// ToRawKubeConfigLoader implemented interface method. -func (k *KubeConfig) ToRawKubeConfigLoader() clientcmd.ClientConfig { +func (k *kubeClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { return k.ClientConfig } diff --git a/pkg/helm/helm.go b/pkg/helm/helm.go new file mode 100644 index 00000000..6737e1a2 --- /dev/null +++ b/pkg/helm/helm.go @@ -0,0 +1,230 @@ +package helm + +import ( + "fmt" + "net/url" + "strings" + "time" + + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/release" + apimachineryschema "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + + "github.com/goto/entropy/pkg/errors" +) + +type Client struct { + config *Config + cliSettings *cli.EnvSettings +} + +func (p *Client) Upsert(config *ReleaseConfig, canUpdateCheck func(rel *release.Release) bool) (*Result, error) { + actionConfig, err := p.getActionConfiguration(config.Namespace) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("error while getting action configuration : %s", err) + } + + rel, err := fetchRelease(actionConfig, config.Name) + if err != nil && !errors.Is(err, errors.ErrNotFound) { + return nil, errors.ErrInternal.WithMsgf("failed to find release").WithCausef(err.Error()) + } + + isCreate := rel == nil // release doesn't exist already. + canUpdate := !isCreate && canUpdateCheck(rel) // exists already and we are allowed to update. + if !isCreate && !canUpdate { + return nil, errors.ErrConflict.WithMsgf("release with same name exists, but update not possible") + } + + if isCreate { + // release does not exist. + return p.doCreate(actionConfig, config) + } + + // already exists and is updatable. + return p.doUpdate(actionConfig, config) +} + +func (p *Client) Delete(config *ReleaseConfig) error { + actionConfig, err := p.getActionConfiguration(config.Namespace) + if err != nil { + return errors.ErrInternal.WithMsgf("error while getting action configuration : %s", err) + } + + act := action.NewUninstall(actionConfig) + if _, err := act.Run(config.Name); err != nil { + return errors.ErrInternal.WithMsgf("unable to uninstall release %s", err) + } + return nil +} + +func (p *Client) doCreate(actionConfig *action.Configuration, config *ReleaseConfig) (*Result, error) { + fetchedChart, chartPathOpts, err := p.getChart(config) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("error while getting chart").WithCausef(err.Error()) + } + + act := action.NewInstall(actionConfig) + act.Wait = true + act.IncludeCRDs = true + act.SkipCRDs = false + act.Timeout = time.Duration(config.Timeout) * time.Second + act.Replace = config.Replace + act.OutputDir = "" + act.Namespace = config.Namespace + act.ClientOnly = false + act.Description = config.Description + act.WaitForJobs = true + act.ReleaseName = config.Name + act.GenerateName = false + act.NameTemplate = "" + act.CreateNamespace = config.CreateNamespace + act.ChartPathOptions = *chartPathOpts + act.DryRun = false + + rel, err := act.Run(fetchedChart, config.Values) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("create-release failed").WithCausef(err.Error()) + } + + return &Result{ + Config: config, + Release: rel, + }, nil +} + +func (p *Client) doUpdate(actionConfig *action.Configuration, config *ReleaseConfig) (*Result, error) { + fetchedChart, chartPathOpts, err := p.getChart(config) + if err != nil { + return nil, errors.ErrInternal.WithMsgf("error while getting chart").WithCausef(err.Error()) + } + + act := action.NewUpgrade(actionConfig) + act.ChartPathOptions = *chartPathOpts + act.DryRun = false + act.Wait = config.Wait + act.WaitForJobs = config.WaitForJobs + act.Timeout = time.Duration(config.Timeout) * time.Second + act.Namespace = config.Namespace + act.Description = config.Description + act.MaxHistory = p.config.Kubernetes.HelmConfig.MaxHistory + + rel, err := act.Run(config.Name, fetchedChart, config.Values) + if err != nil { + if isReleaseNotFoundErr(err) { + return nil, errors.ErrNotFound. + WithMsgf("update-release failed"). + WithCausef("release with given name not found") + } + return nil, errors.ErrInternal.WithMsgf("update-release failed").WithCausef(err.Error()) + } + + return &Result{ + Config: config, + Release: rel, + }, nil +} + +func (p *Client) getChart(config *ReleaseConfig) (*chart.Chart, *action.ChartPathOptions, error) { + repositoryURL, chartName := resolveChartName(config.Repository, strings.TrimSpace(config.Chart)) + + chartPathOpts := &action.ChartPathOptions{ + RepoURL: repositoryURL, + Version: getVersion(config.Version), + } + + // TODO: Add a lock as Load function blows up if accessed concurrently + path, err := chartPathOpts.LocateChart(chartName, p.cliSettings) + if err != nil { + return nil, nil, err + } + + fetchedChart, err := loader.Load(path) + if err != nil { + return nil, nil, err + } + + // TODO: check if chart has dependencies and load those dependencies + if fetchedChart.Metadata.Type != typeApplication { + return nil, nil, ErrChartNotApplication + } + + return fetchedChart, chartPathOpts, nil +} + +func (p *Client) getActionConfiguration(namespace string) (*action.Configuration, error) { + hasCA := len(p.config.Kubernetes.ClusterCACertificate) != 0 + hasCert := len(p.config.Kubernetes.ClientCertificate) != 0 + defaultTLS := hasCA || hasCert || p.config.Kubernetes.Insecure + host, _, err := rest.DefaultServerURL(p.config.Kubernetes.Host, "", apimachineryschema.GroupVersion{}, defaultTLS) + if err != nil { + return nil, err + } + + clientConfig := clientcmd.NewDefaultClientConfig(*api.NewConfig(), &clientcmd.ConfigOverrides{ + AuthInfo: api.AuthInfo{ + Token: p.config.Kubernetes.Token, + ClientKeyData: []byte(p.config.Kubernetes.ClientKey), + ClientCertificateData: []byte(p.config.Kubernetes.ClientCertificate), + }, + ClusterInfo: api.Cluster{ + Server: host.String(), + InsecureSkipTLSVerify: p.config.Kubernetes.Insecure, + CertificateAuthorityData: []byte(p.config.Kubernetes.ClusterCACertificate), + }, + }) + kubeConf := &kubeClientGetter{ClientConfig: clientConfig} + + actionConfig := &action.Configuration{} + if err := actionConfig.Init(kubeConf, namespace, p.config.HelmDriver, noOpLog); err != nil { + return nil, err + } + return actionConfig, nil +} + +func NewClient(config *Config) *Client { + return &Client{config: config, cliSettings: cli.New()} +} + +func fetchRelease(cfg *action.Configuration, name string) (*release.Release, error) { + get := action.NewGet(cfg) + res, err := get.Run(name) + if err != nil { + if isReleaseNotFoundErr(err) { + return nil, errors.ErrNotFound.WithCausef(err.Error()) + } + return nil, err + } + return res, nil +} + +func getVersion(version string) string { + if version == "" { + return ">0.0.0-0" + } + return strings.TrimSpace(version) +} + +func resolveChartName(repository, name string) (string, string) { + _, err := url.ParseRequestURI(repository) + if err == nil { + return repository, name + } + + if !strings.Contains(name, "/") && repository != "" { + name = fmt.Sprintf("%s/%s", repository, name) + } + + return "", name +} + +func isReleaseNotFoundErr(err error) bool { + return strings.Contains(err.Error(), "release: not found") +} + +func noOpLog(_ string, _ ...interface{}) {} diff --git a/pkg/helm/release.go b/pkg/helm/release.go index 94b20612..07aa9b8d 100644 --- a/pkg/helm/release.go +++ b/pkg/helm/release.go @@ -1,29 +1,19 @@ package helm import ( - "encoding/json" - "fmt" - "net/url" - "strings" - "time" - "github.com/mcuadros/go-defaults" - "helm.sh/helm/v3/pkg/action" - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/release" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) var ( typeApplication = "application" - ErrReleaseNotFound = errors.New("release not found") ErrChartNotApplication = errors.New("helm chart is not an application chart") ) type ReleaseConfig struct { - // Name - Release Name + // Name - Result Name Name string `json:"name" mapstructure:"name"` // Repository - Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository. Repository string `json:"repository" mapstructure:"repository"` @@ -53,16 +43,9 @@ type ReleaseConfig struct { CreateNamespace bool `json:"create_namespace" mapstructure:"create_namespace" default:"false"` } -type Release struct { - Config *ReleaseConfig - Output ReleaseOutput -} - -type ReleaseOutput struct { - // Status - Status of the release. - Status Status - // Revision - Revision of the release. - Release string +type Result struct { + Config *ReleaseConfig + Release *release.Release } func DefaultReleaseConfig() *ReleaseConfig { @@ -70,260 +53,3 @@ func DefaultReleaseConfig() *ReleaseConfig { defaults.SetDefaults(defaultReleaseConfig) return defaultReleaseConfig } - -// Create - creates a helm release with its configs. -func (p *Client) Create(config *ReleaseConfig) (*Release, error) { - actionConfig, err := p.getActionConfiguration(config.Namespace) - if err != nil { - return nil, errors.ErrInternal.WithMsgf("error while getting action configuration: %s", err) - } - - chartPathOptions, chartName := p.chartPathOptions(config) - - fetchedChart, err := p.getChart(chartName, chartPathOptions) - if err != nil { - return nil, errors.ErrInternal.WithMsgf("error while getting chart: %s", err) - } - - // TODO: check if chart has dependencies and load those dependencies - - if fetchedChart.Metadata.Type != typeApplication { - return nil, ErrChartNotApplication - } - - client := action.NewInstall(actionConfig) - client.ChartPathOptions = *chartPathOptions - client.ClientOnly = false - client.DryRun = false - client.Wait = config.Wait - client.WaitForJobs = config.WaitForJobs - client.Timeout = time.Second * time.Duration(config.Timeout) - client.Namespace = config.Namespace - client.ReleaseName = config.Name - client.GenerateName = false - client.NameTemplate = "" - client.OutputDir = "" - client.Replace = config.Replace - client.Description = config.Description - client.CreateNamespace = config.CreateNamespace - - rel, err := client.Run(fetchedChart, config.Values) - if err != nil && rel == nil { - return nil, errors.ErrInternal.WithMsgf("error while installing release: %s", err) - } - - if err != nil && rel != nil { - releaseExists, releaseErr := p.resourceReleaseExists(config.Name, config.Namespace) - - if releaseErr != nil { - return nil, errors.ErrConflict.WithMsgf("release already exists: %s", releaseErr) - } - - if !releaseExists { - return nil, errors.ErrNotFound.WithMsgf("release doesn't exists: %s", err) - } - - releaseJSON, err := json.Marshal(rel) - if err != nil { - return nil, errors.ErrInternal.WithMsgf("error while json marshalling release: %s", err) - } - - return &Release{ - Config: config, - Output: ReleaseOutput{ - Status: mapReleaseStatus(rel.Info.Status), - Release: string(releaseJSON), - }, - }, errors.ErrInternal.WithMsgf("helm release created with failure: %s", err) - } - - releaseJSON, err := json.Marshal(rel) - if err != nil { - return nil, errors.ErrInternal.WithMsgf("error while json marshalling release: %s", err) - } - - return &Release{ - Config: config, - Output: ReleaseOutput{ - Status: mapReleaseStatus(rel.Info.Status), - Release: string(releaseJSON), - }, - }, nil -} - -// Update - updates a helm release with its configs. -func (p *Client) Update(config *ReleaseConfig) (*Release, error) { - var rel *release.Release - - actionConfig, err := p.getActionConfiguration(config.Namespace) - if err != nil { - return nil, errors.ErrInternal.WithMsgf("error while getting action configuration : %s", err) - } - - chartPathOptions, chartName := p.chartPathOptions(config) - - fetchedChart, err := p.getChart(chartName, chartPathOptions) - if err != nil { - return nil, errors.ErrInternal.WithMsgf("error while getting fetchedChart : %s", err) - } - - // TODO: check if fetchedChart has dependencies and load those dependencies - - if fetchedChart.Metadata.Type != typeApplication { - return nil, ErrChartNotApplication - } - - client := action.NewUpgrade(actionConfig) - client.ChartPathOptions = *chartPathOptions - client.DryRun = false - client.Wait = config.Wait - client.WaitForJobs = config.WaitForJobs - client.Timeout = time.Second * time.Duration(config.Timeout) - client.Namespace = config.Namespace - client.Description = config.Description - - rel, err = client.Run(config.Name, fetchedChart, config.Values) - if err != nil && rel == nil { - return nil, errors.ErrInternal.WithMsgf("error while updating release: %s", err) - } - - if err != nil && rel != nil { - releaseExists, _ := p.resourceReleaseExists(config.Name, config.Namespace) - - if !releaseExists { - return nil, errors.ErrNotFound.WithMsgf("release doesn't exists: %s", err) - } - - releaseJSON, jsonErr := json.Marshal(rel) - if jsonErr != nil { - return nil, errors.ErrInternal.WithMsgf("error while json marshalling release: %s", err) - } - - return &Release{ - Config: config, - Output: ReleaseOutput{ - Status: mapReleaseStatus(rel.Info.Status), - Release: string(releaseJSON), - }, - }, errors.ErrInternal.WithMsgf("helm release updated with failure: %s", err) - } - - releaseJSON, err := json.Marshal(rel) - if err != nil { - return nil, errors.ErrInternal.WithMsgf("error while json marshalling release: %s", err) - } - - return &Release{ - Config: config, - Output: ReleaseOutput{ - Status: mapReleaseStatus(rel.Info.Status), - Release: string(releaseJSON), - }, - }, nil -} - -func (p *Client) Delete(config *ReleaseConfig) error { - actionConfig, err := p.getActionConfiguration(config.Namespace) - if err != nil { - return errors.ErrInternal.WithMsgf("error while getting action configuration : %s", err) - } - - uninstall := action.NewUninstall(actionConfig) - run, err := uninstall.Run(config.Name) - if run != nil && run.Release.Info.Status == release.StatusUninstalled || run.Release.Info.Status == release.StatusUninstalling { - return nil - } else { - return errors.ErrInternal.WithMsgf("unable to uninstall release %s", err) - } -} - -func (*Client) chartPathOptions(config *ReleaseConfig) (*action.ChartPathOptions, string) { - repositoryURL, chartName := resolveChartName(config.Repository, strings.TrimSpace(config.Chart)) - - version := getVersion(config.Version) - - return &action.ChartPathOptions{ - RepoURL: repositoryURL, - Version: version, - }, chartName -} - -func resolveChartName(repository, name string) (string, string) { - _, err := url.ParseRequestURI(repository) - if err == nil { - return repository, name - } - - if !strings.Contains(name, "/") && repository != "" { - name = fmt.Sprintf("%s/%s", repository, name) - } - - return "", name -} - -func getVersion(version string) string { - if version == "" { - return ">0.0.0-0" - } - return strings.TrimSpace(version) -} - -func (p *Client) getChart(name string, cpo *action.ChartPathOptions) (*chart.Chart, error) { - // TODO: Add a lock as Load function blows up if accessed concurrently - - path, err := cpo.LocateChart(name, p.cliSettings) - if err != nil { - return nil, err - } - - c, err := loader.Load(path) - if err != nil { - return nil, err - } - - return c, nil -} - -func (p *Client) resourceReleaseExists(name string, namespace string) (bool, error) { - c, err := p.getActionConfiguration(namespace) - if err != nil { - return false, err - } - - _, err = p.getRelease(c, name) - - if err == nil { - return true, nil - } - - if errors.Is(err, ErrReleaseNotFound) { - return false, nil - } - - return false, err -} - -func (*Client) getRelease(cfg *action.Configuration, name string) (*release.Release, error) { - // TODO: Add provider level lock to make sure no other operation is changing this release - - get := action.NewGet(cfg) - res, err := get.Run(name) - if err != nil { - if strings.Contains(err.Error(), "release: not found") { - return nil, ErrReleaseNotFound - } - return nil, err - } - return res, nil -} - -func mapReleaseStatus(status release.Status) Status { - switch status { - case "unknown": - return StatusUnknown - case "deployed": - return StatusSuccess - default: - return StatusFailed - } -} diff --git a/pkg/helm/release_test.go b/pkg/helm/release_test.go deleted file mode 100644 index ac91569e..00000000 --- a/pkg/helm/release_test.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build integration -// +build integration - -package helm - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "math/rand" - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -var jsonValues = ` -{ - "firehose": { - "image": { - "tag": "0.1.1" - } - } -} -` - -var updatedJsonValues = ` -{ - "firehose": { - "image": { - "tag": "0.1.2" - } - } -} -` - -func jsonMarshal(data interface{}) string { - j, _ := json.MarshalIndent(data, "", " ") - return string(j) -} - -func jsonUnmarshal(data string) map[string]interface{} { - ret := map[string]interface{}{} - _ = json.Unmarshal([]byte(data), &ret) - return ret -} - -func getClient() *Client { - envKubeAPIServer := os.Getenv("TEST_K8S_API_SERVER") - envKubeSAToken := os.Getenv("TEST_K8S_SA_TOKEN") - clientConfig := DefaultClientConfig() - clientConfig.Kubernetes.Host = envKubeAPIServer - clientConfig.Kubernetes.Insecure = true - tokenBytes, _ := base64.StdEncoding.DecodeString(envKubeSAToken) - clientConfig.Kubernetes.Token = string(tokenBytes) - return NewClient(clientConfig) -} - -func TestReleaseCreate(t *testing.T) { - client := getClient() - - releaseName := fmt.Sprintf("test-entropy-helm-client-create-%d", rand.Int()) - - releaseConfig := DefaultReleaseConfig() - releaseConfig.Name = releaseName - releaseConfig.Repository = "https://odpf.github.io/charts/" - releaseConfig.Chart = "firehose" - releaseConfig.Version = "0.1.1" - releaseConfig.Values = jsonUnmarshal(jsonValues) - releaseConfig.WaitForJobs = true - rel, err := client.Create(releaseConfig) - - assert.Nil(t, err) - assert.NotNil(t, rel) - - _ = client.Delete(releaseConfig) -} - -func TestReleaseUpdate(t *testing.T) { - client := getClient() - - releaseName := fmt.Sprintf("test-entropy-helm-client-update-%d", rand.Int()) - - releaseConfig := DefaultReleaseConfig() - releaseConfig.Name = releaseName - releaseConfig.Repository = "https://odpf.github.io/charts/" - releaseConfig.Chart = "firehose" - releaseConfig.Version = "0.1.1" - releaseConfig.Values = jsonUnmarshal(jsonValues) - releaseConfig.WaitForJobs = true - rel, err := client.Create(releaseConfig) - - assert.Nil(t, err) - assert.NotNil(t, rel) - - releaseConfig.Values = jsonUnmarshal(updatedJsonValues) - rel2, err := client.Update(releaseConfig) - - assert.Nil(t, err) - assert.NotNil(t, rel2) - - _ = client.Delete(releaseConfig) -} - -func TestReleaseDelete(t *testing.T) { - client := getClient() - - releaseName := fmt.Sprintf("test-entropy-helm-client-delete-%d", rand.Int()) - - releaseConfig := DefaultReleaseConfig() - releaseConfig.Name = releaseName - releaseConfig.Repository = "https://odpf.github.io/charts/" - releaseConfig.Chart = "firehose" - releaseConfig.Version = "0.1.1" - releaseConfig.Values = jsonUnmarshal(jsonValues) - releaseConfig.WaitForJobs = true - rel, err := client.Create(releaseConfig) - - assert.Nil(t, err) - assert.NotNil(t, rel) - - err = client.Delete(releaseConfig) - - assert.Nil(t, err) -} diff --git a/pkg/helm/status.go b/pkg/helm/status.go deleted file mode 100644 index d980e451..00000000 --- a/pkg/helm/status.go +++ /dev/null @@ -1,11 +0,0 @@ -package helm - -type Status string - -const ( - StatusUnknown Status = "unknown" - StatusSuccess Status = "success" - StatusFailed Status = "failed" -) - -func (x Status) String() string { return string(x) } diff --git a/pkg/kafka/consumer_reset.go b/pkg/kafka/consumer_reset.go new file mode 100644 index 00000000..67677399 --- /dev/null +++ b/pkg/kafka/consumer_reset.go @@ -0,0 +1,111 @@ +package kafka + +import ( + "context" + "encoding/json" + "strings" + + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube" +) + +const ( + kafkaImage = "bitnami/kafka:2.0.0" + retries = 6 +) + +const ( + resetLatest = "latest" + resetEarliest = "earliest" + resetDatetime = "datetime" +) + +type ResetV2Params struct { + To string `json:"to"` + Datetime string `json:"datetime"` +} + +type ResetParams struct { + To string `json:"to"` +} + +// DoReset executes a kubernetes job with kafka-consumer-group.sh installed to +// reset offset policy for the given consumer id on all topics. +func DoReset(ctx context.Context, jobCluster *kube.Client, kubeNamespace, kafkaBrokers, kafkaConsumerID, kafkaResetValue, resetJobName string) error { + suffix := "-firehose" + resetJobName = strings.TrimSuffix(resetJobName, suffix) + resetJobName += "-reset" + + _, err := jobCluster.RunJob(ctx, kubeNamespace, + resetJobName, + kafkaImage, + prepCommand(kafkaBrokers, kafkaConsumerID, kafkaResetValue), + retries, + true, + ) + return err +} + +// ParseResetV2Params parses the given JSON data as reset parameters value and +// returns the actual reset value to be used with DoReset(). +func ParseResetV2Params(bytes json.RawMessage) (string, error) { + var params ResetV2Params + if err := json.Unmarshal(bytes, ¶ms); err != nil { + return "", errors.ErrInvalid. + WithMsgf("invalid reset params"). + WithCausef(err.Error()) + } + + resetValue := strings.ToLower(params.To) + if params.To == resetDatetime { + resetValue = params.Datetime + } else if resetValue != resetLatest && resetValue != resetEarliest { + return "", errors.ErrInvalid. + WithMsgf("reset_value must be one of %v", []string{resetEarliest, resetLatest, resetDatetime}) + } + + return resetValue, nil +} + +// ParseResetParams parses the given JSON data as reset parameters value and +// returns the actual reset value to be used with DoReset(). +func ParseResetParams(bytes json.RawMessage) (string, error) { + var params ResetParams + if err := json.Unmarshal(bytes, ¶ms); err != nil { + return "", errors.ErrInvalid. + WithMsgf("invalid reset params"). + WithCausef(err.Error()) + } + + resetValue := strings.ToLower(params.To) + if resetValue != resetLatest && resetValue != resetEarliest { + return "", errors.ErrInvalid. + WithMsgf("reset_value must be one of %v", []string{resetEarliest, resetLatest}) + } + + return resetValue, nil +} + +func prepCommand(brokers, consumerID, kafkaResetValue string) []string { + args := []string{ + "kafka-consumer-groups.sh", + "--bootstrap-server", brokers, + "--group", consumerID, + "--reset-offsets", + "--execute", + "--all-topics", + } + + switch kafkaResetValue { + case resetLatest: + args = append(args, "--to-latest") + + case resetEarliest: + args = append(args, "--to-earliest") + + default: + args = append(args, "--to-datetime", kafkaResetValue) + } + + return args +} diff --git a/pkg/kube/client.go b/pkg/kube/client.go index a41fd01c..3c4e166a 100644 --- a/pkg/kube/client.go +++ b/pkg/kube/client.go @@ -14,15 +14,20 @@ import ( "github.com/mitchellh/mapstructure" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" typedbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" "k8s.io/client-go/rest" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/kube/job" ) const ( @@ -51,6 +56,14 @@ type LogChunk struct { type Pod struct { Name string `json:"name"` Containers []string `json:"containers"` + Status string `json:"status"` +} + +type FlinkDeploymentStatus struct { + State string `json:"state"` + JMDeployStatus string `json:"jm_deploy_status"` + JobStatus string `json:"job_status"` + Reconciliation string `json:"reconciliation"` } type LogOptions struct { @@ -64,6 +77,16 @@ type LogOptions struct { TailLines string `mapstructure:"tail_lines"` } +type Deployment struct { + Name string `json:"name"` + Paused bool `json:"paused"` + Replicas int `json:"replicas"` + ReadyReplicas int `json:"ready_replicas"` + AvailableReplicas int `json:"available_replicas"` + UnavailableReplicas int `json:"unavailable_replicas"` + Conditions []map[string]string `json:"conditions"` +} + func (l LogOptions) getPodListOptions() (metav1.ListOptions, error) { labelSelector := labels.NewSelector() fieldSelector := fields.Everything() @@ -125,11 +148,19 @@ func DefaultClientConfig() Config { return defaultProviderConfig } -func NewClient(config Config) *Client { - return &Client{ - restConfig: *config.RESTConfig(), - streamingConfig: *config.StreamingConfig(), +func NewClient(ctx context.Context, config Config) (*Client, error) { + restConfig, err := config.RESTConfig(ctx) + if err != nil { + return nil, err } + streamingConfig, err := config.StreamingConfig(ctx) + if err != nil { + return nil, err + } + return &Client{ + restConfig: *restConfig, + streamingConfig: *streamingConfig, + }, nil } func (c Client) StreamLogs(ctx context.Context, namespace string, filter map[string]string) (<-chan LogChunk, error) { @@ -143,10 +174,10 @@ func (c Client) StreamLogs(ctx context.Context, namespace string, filter map[str return c.streamFromPods(ctx, namespace, logOptions) } -func (c Client) RunJob(ctx context.Context, namespace, name string, image string, cmd []string, retries int32) error { +func (c Client) RunJob(ctx context.Context, namespace, name string, image string, cmd []string, retries int32, wait bool) (*batchv1.Job, error) { clientSet, err := kubernetes.NewForConfig(&c.restConfig) if err != nil { - return ErrJobCreationFailed.WithCausef(err.Error()) + return nil, ErrJobCreationFailed.WithCausef(err.Error()) } jobs := clientSet.BatchV1().Jobs(namespace) @@ -164,9 +195,8 @@ func (c Client) RunJob(ctx context.Context, namespace, name string, image string Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: name, - Image: image, - + Name: name, + Image: image, Command: cmd, }, }, @@ -177,12 +207,14 @@ func (c Client) RunJob(ctx context.Context, namespace, name string, image string }, } - _, err = jobs.Create(ctx, jobSpec, metav1.CreateOptions{}) + job, err := jobs.Create(ctx, jobSpec, metav1.CreateOptions{}) if err != nil { - return ErrJobCreationFailed.WithCausef(err.Error()) + return nil, ErrJobCreationFailed.WithCausef(err.Error()) } - - return waitForJob(ctx, name, jobs) + if !wait { + return job, nil + } + return job, waitForJob(ctx, name, jobs) } func waitForJob(ctx context.Context, jobName string, jobs typedbatchv1.JobInterface) error { @@ -263,7 +295,15 @@ func (c Client) streamFromPods(ctx context.Context, namespace string, logOptions return logCh, nil } -func (c Client) GetPodDetails(ctx context.Context, namespace string, labelSelectors map[string]string) ([]Pod, error) { +func (c Client) GetJobProcessor(j *job.Job) (*job.Processor, error) { + clientSet, err := kubernetes.NewForConfig(&c.restConfig) + if err != nil { + return nil, err + } + return job.NewProcessor(j, clientSet.BatchV1().Jobs(j.Namespace)), nil +} + +func (c Client) GetPodDetails(ctx context.Context, namespace string, labelSelectors map[string]string, allow func(pod corev1.Pod) bool) ([]Pod, error) { var podDetails []Pod var selectors []string var labelSelector string @@ -287,8 +327,12 @@ func (c Client) GetPodDetails(ctx context.Context, namespace string, labelSelect } for _, pod := range pods.Items { + if !allow(pod) { + continue + } podDetail := Pod{ - Name: pod.Name, + Name: pod.Name, + Status: string(pod.Status.Phase), } for _, container := range pod.Spec.Containers { @@ -300,6 +344,29 @@ func (c Client) GetPodDetails(ctx context.Context, namespace string, labelSelect return podDetails, nil } +func (c Client) GetCRDDetails(ctx context.Context, namespace string, name string) (*unstructured.Unstructured, error) { + // Initialize the dynamic client + dynamicClient, err := dynamic.NewForConfig(&c.restConfig) + if err != nil { + return nil, fmt.Errorf("failed to create dynamic client: %v", err) + } + + // Define the GVR (GroupVersionResource) for the FlinkDeployment CRD + gvr := schema.GroupVersionResource{ + Group: "flink.apache.org", + Version: "v1beta1", + Resource: "flinkdeployments", + } + + // Fetch the FlinkDeployment CRD details + flinkDeployment, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get FlinkDeployment: %v", err) + } + + return flinkDeployment, nil +} + func streamContainerLogs(ctx context.Context, ns, podName string, logCh chan<- LogChunk, clientSet *kubernetes.Clientset, podLogOpts corev1.PodLogOptions, ) error { @@ -334,3 +401,47 @@ func streamContainerLogs(ctx context.Context, ns, podName string, logCh chan<- L } } } + +func (c Client) GetDeploymentDetails(ctx context.Context, namespace string, name string) (Deployment, error) { + clientSet, err := kubernetes.NewForConfig(&c.restConfig) + if err != nil { + return Deployment{}, err + } + + deployment, err := clientSet.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if k8s_errors.IsNotFound(err) { + return Deployment{}, nil + } + + if err != nil { + return Deployment{}, err + } + + d := Deployment{ + Name: deployment.Name, + Paused: deployment.Spec.Paused, + Replicas: int(deployment.Status.Replicas), + ReadyReplicas: int(deployment.Status.ReadyReplicas), + AvailableReplicas: int(deployment.Status.AvailableReplicas), + UnavailableReplicas: int(deployment.Status.UnavailableReplicas), + } + + if deployment.Status.Conditions != nil { + d.Conditions = make([]map[string]string, 0, len(deployment.Status.Conditions)) + for _, condition := range deployment.Status.Conditions { + if condition.Status == corev1.ConditionUnknown || condition.Status == corev1.ConditionFalse { + continue + } + + condMap := map[string]string{ + "type": string(condition.Type), + "status": string(condition.Status), + "reason": condition.Reason, + "message": condition.Message, + } + d.Conditions = append(d.Conditions, condMap) + } + } + + return d, nil +} diff --git a/pkg/kube/client_test.go b/pkg/kube/client_test.go index 104c32cf..8eb92586 100644 --- a/pkg/kube/client_test.go +++ b/pkg/kube/client_test.go @@ -8,8 +8,9 @@ import ( "os" "testing" - "github.com/odpf/entropy/pkg/errors" "github.com/stretchr/testify/assert" + + "github.com/goto/entropy/pkg/errors" ) var ( @@ -83,7 +84,7 @@ func TestGetStreamingLogs(t *testing.T) { config.ClientKey = tt.ClientKey config.ClientCertificate = tt.ClientCert - client := NewClient(config) + client := NewClient(context.Background(), config) ctx := new(context.Context) _, err := client.StreamLogs(*ctx, tt.Namespace, filter) diff --git a/pkg/kube/config.go b/pkg/kube/config.go index 8d679c0a..13b0cdee 100644 --- a/pkg/kube/config.go +++ b/pkg/kube/config.go @@ -1,18 +1,31 @@ package kube import ( + "context" "time" + "golang.org/x/oauth2/google" + "google.golang.org/api/container/v1" "k8s.io/client-go/rest" + + "github.com/goto/entropy/pkg/errors" +) + +const ( + providerTypeGKE = "gke" ) +type HelmConfig struct { + MaxHistory int `json:"max_history" default:"256"` +} + type Config struct { // Host - The hostname (in form of URI) of Kubernetes master. Host string `json:"host"` Timeout time.Duration `json:"timeout" default:"100ms"` - // Token - Token to authenticate a service account + // Token - Token to authenticate with static oauth2 access token Token string `json:"token"` // Insecure - Whether server should be accessed without verifying the TLS certificate. @@ -26,28 +39,94 @@ type Config struct { // ClusterCACertificate - PEM-encoded root certificates bundle for TLS authentication. ClusterCACertificate string `json:"cluster_ca_certificate"` + + // ProviderType indicates which provider that hos k8s: gke, eks, etc... + // If it is `gke`, entropy will fetch auth from the default source + // left it empty if token or client key will be used + ProviderType string `json:"provider_type"` + + // Namespace defines where the resources that depend on this kube resource deployed to + // namespace is optional, if it is being defined, it will force all resources that depend + // on this kube resource to be deployed to the defined namespace + Namespace string `json:"namespace"` + + HelmConfig HelmConfig `json:"helm_config"` + + // Maximum burst for throttle. + Burst int `json:"burst" default:"10"` + + // QPS indicates the maximum QPS to the master from this client. + QPS float32 `json:"qps" default:"5"` } -func (conf Config) RESTConfig() *rest.Config { +func (conf *Config) RESTConfig(ctx context.Context) (*rest.Config, error) { rc := &rest.Config{ Host: conf.Host, Timeout: conf.Timeout, TLSClientConfig: rest.TLSClientConfig{ + Insecure: conf.Insecure, CAData: []byte(conf.ClusterCACertificate), KeyData: []byte(conf.ClientKey), CertData: []byte(conf.ClientCertificate), }, } - if conf.Token != "" { + if conf.ProviderType != "" { + switch conf.ProviderType { + case providerTypeGKE: + ts, err := google.DefaultTokenSource(ctx, container.CloudPlatformScope) + if err != nil { + return nil, errors.ErrInvalid.WithMsgf("%s: can't fetch credentials from service account json", conf.ProviderType).WithCausef(err.Error()) + } + oauth2Token, err := ts.Token() + if err != nil { + return nil, errors.ErrInternal.WithMsgf("%s: can't get token from token source", conf.ProviderType).WithCausef(err.Error()) + } + rc.BearerToken = oauth2Token.AccessToken + conf.Token = oauth2Token.AccessToken + default: + return nil, errors.ErrInternal.WithMsgf("%s: unsupported provider type", conf.ProviderType) + } + } else if conf.Token != "" { rc.BearerToken = conf.Token } - return rc + rc.Burst = conf.Burst + rc.QPS = conf.QPS + + return rc, nil } -func (conf Config) StreamingConfig() *rest.Config { - rc := conf.RESTConfig() +func (conf *Config) StreamingConfig(ctx context.Context) (*rest.Config, error) { + rc, err := conf.RESTConfig(ctx) + if err != nil { + return nil, err + } rc.Timeout = 0 - return rc + return rc, nil +} + +func (conf *Config) Sanitise() error { + if conf.Host == "" { + return errors.ErrInvalid.WithMsgf("host must be set") + } + + if conf.Timeout == 0 { + conf.Timeout = 1 * time.Second + } + + if conf.ProviderType == "" { + if conf.Token == "" { + if conf.ClientKey == "" || conf.ClientCertificate == "" { + return errors.ErrInvalid. + WithMsgf("client_key and client_certificate must be set when token and service account is not set") + } + } + + if !conf.Insecure && len(conf.ClusterCACertificate) == 0 { + return errors.ErrInvalid.WithMsgf("cluster_ca_certificate must be set when insecure=false") + } + } + + return nil } diff --git a/pkg/kube/container/container.go b/pkg/kube/container/container.go new file mode 100644 index 00000000..4ef6857f --- /dev/null +++ b/pkg/kube/container/container.go @@ -0,0 +1,112 @@ +package container + +import ( + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type Container struct { + Image string + Name string + EnvConfigMaps []string + Command []string + EnvMap map[string]string + Args []string + ImagePullPolicy string + VolumeMounts []VolumeMount + Requests map[string]string + Limits map[string]string + PreStopCmd []string + PostStartCmd []string +} + +type VolumeMount struct { + Name string + MountPath string +} + +func (c Container) Template() corev1.Container { + var env []corev1.EnvVar + for k, v := range c.EnvMap { + env = append(env, corev1.EnvVar{ + Name: k, + Value: v, + }) + } + var envFrom []corev1.EnvFromSource + for _, configMap := range c.EnvConfigMaps { + envFrom = append(envFrom, corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMap}, + }, + }) + } + var mounts []corev1.VolumeMount + for _, v := range c.VolumeMounts { + mounts = append(mounts, corev1.VolumeMount{ + Name: v.Name, + MountPath: v.MountPath, + }) + } + + // Shared directory for all the containers + mounts = append(mounts, corev1.VolumeMount{ + Name: "shared-data", + MountPath: "/shared", + }) + + var lifecycle corev1.Lifecycle + if len(c.PreStopCmd) > 0 { + lifecycle.PreStop = &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{Command: c.PreStopCmd}, + } + } + if len(c.PostStartCmd) > 0 { + lifecycle.PostStart = &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{Command: c.PostStartCmd}, + } + } + + return corev1.Container{ + Name: c.Name, + Image: c.Image, + Command: c.Command, + Args: c.Args, + EnvFrom: envFrom, + Env: env, + Resources: c.parseResources(), + VolumeMounts: mounts, + Lifecycle: &lifecycle, + ImagePullPolicy: corev1.PullPolicy(c.ImagePullPolicy), + } +} + +func (c Container) parseResources() corev1.ResourceRequirements { + cpuLimits, err := resource.ParseQuantity(c.Limits["cpu"]) + if err != nil { + zap.L().Error(err.Error()) + } + memLimits, err := resource.ParseQuantity(c.Limits["memory"]) + if err != nil { + zap.L().Error(err.Error()) + } + cpuRequests, err := resource.ParseQuantity(c.Requests["cpu"]) + if err != nil { + zap.L().Error(err.Error()) + } + memRequests, err := resource.ParseQuantity(c.Requests["memory"]) + if err != nil { + zap.L().Error(err.Error()) + } + return corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: cpuLimits, + corev1.ResourceMemory: memLimits, + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: cpuRequests, + corev1.ResourceMemory: memRequests, + }, + } +} diff --git a/pkg/kube/container/container_test.go b/pkg/kube/container/container_test.go new file mode 100644 index 00000000..05e21735 --- /dev/null +++ b/pkg/kube/container/container_test.go @@ -0,0 +1,91 @@ +package container + +import ( + "reflect" + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestContainer_Template(t *testing.T) { + quantity, _ := resource.ParseQuantity("100") + type fields struct { + Image string + Name string + EnvConfigMaps []string + Command []string + EnvMap map[string]string + Args []string + ImagePullPolicy string + VolumeMounts []VolumeMount + Requests map[string]string + Limits map[string]string + } + tests := []struct { + name string + fields fields + want v1.Container + }{ + {name: "Container Create", fields: fields{ + Image: "image:v1.0", + Name: "container-name", + EnvConfigMaps: []string{"cm1"}, + Command: []string{"cmd1", "cmd2"}, + EnvMap: map[string]string{"a": "b"}, + Args: nil, + ImagePullPolicy: "Never", + VolumeMounts: []VolumeMount{{ + Name: "v1", + MountPath: "/tmp/v1", + }}, + Requests: map[string]string{"cpu": "100", "memory": "100"}, + Limits: map[string]string{"cpu": "100", "memory": "100"}, + }, want: v1.Container{ + Name: "container-name", + Image: "image:v1.0", + Command: []string{"cmd1", "cmd2"}, + EnvFrom: []v1.EnvFromSource{{ + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "cm1"}, + }, + }}, + Env: []v1.EnvVar{{Name: "a", Value: "b"}}, + Resources: v1.ResourceRequirements{ + Limits: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: quantity, + v1.ResourceMemory: quantity, + }, + Requests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: quantity, + v1.ResourceMemory: quantity, + }, + }, + Lifecycle: &v1.Lifecycle{}, + VolumeMounts: []v1.VolumeMount{ + {Name: "v1", MountPath: "/tmp/v1"}, + {Name: "shared-data", MountPath: "/shared"}, + }, + ImagePullPolicy: "Never", + }}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := Container{ + Image: tt.fields.Image, + Name: tt.fields.Name, + EnvConfigMaps: tt.fields.EnvConfigMaps, + Command: tt.fields.Command, + EnvMap: tt.fields.EnvMap, + Args: tt.fields.Args, + ImagePullPolicy: tt.fields.ImagePullPolicy, + VolumeMounts: tt.fields.VolumeMounts, + Requests: tt.fields.Requests, + Limits: tt.fields.Limits, + } + if got := c.Template(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Template() = %v\n, want %v\n", got, tt.want) + } + }) + } +} diff --git a/pkg/kube/job/job.go b/pkg/kube/job/job.go new file mode 100644 index 00000000..2028f54f --- /dev/null +++ b/pkg/kube/job/job.go @@ -0,0 +1,46 @@ +package job + +import ( + "strings" + + v1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/goto/entropy/pkg/kube/pod" +) + +const WatchTimeout int64 = 60 + +type Job struct { + Pod *pod.Pod + Name string + Namespace string + Labels map[string]string + Parallelism *int32 + BackOffList *int32 + TTLSeconds *int32 +} + +func (j *Job) Template() *v1.Job { + return &v1.Job{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: j.Name, + Labels: j.Labels, + Namespace: j.Namespace, + }, + Spec: v1.JobSpec{ + Template: j.Pod.Template(), + Parallelism: j.Parallelism, + BackoffLimit: j.BackOffList, + TTLSecondsAfterFinished: j.TTLSeconds, + }, + Status: v1.JobStatus{}, + } +} + +func (j *Job) WatchOptions() metav1.ListOptions { + timout := WatchTimeout + label := strings.Join([]string{"name", j.Name}, "=") + return metav1.ListOptions{TimeoutSeconds: &timout, LabelSelector: label} +} diff --git a/pkg/kube/job/job_test.go b/pkg/kube/job/job_test.go new file mode 100644 index 00000000..c3bda326 --- /dev/null +++ b/pkg/kube/job/job_test.go @@ -0,0 +1,76 @@ +package job + +import ( + "reflect" + "testing" + + v12 "k8s.io/api/batch/v1" + v13 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/goto/entropy/pkg/kube/pod" +) + +func TestJob_Template(t *testing.T) { + var constant int32 = 1 + type fields struct { + Pod *pod.Pod + Name string + Namespace string + Labels map[string]string + Parallelism *int32 + BackOffList *int32 + } + tests := []struct { + name string + fields fields + want *v12.Job + }{ + {name: "Job template", fields: fields{ + Pod: &pod.Pod{ + Name: "pod-name", + }, + Name: "job-name", + Namespace: "default", + Labels: nil, + Parallelism: &constant, + BackOffList: &constant, + }, want: &v12.Job{ + ObjectMeta: v1.ObjectMeta{ + Name: "job-name", + Namespace: "default", + }, + Spec: v12.JobSpec{ + BackoffLimit: &constant, + Parallelism: &constant, + Template: v13.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{Name: "pod-name"}, + Spec: v13.PodSpec{ + Containers: nil, + Volumes: []v13.Volume{{ + Name: "shared-data", + VolumeSource: v13.VolumeSource{EmptyDir: &v13.EmptyDirVolumeSource{}}, + }}, + RestartPolicy: v13.RestartPolicyNever, + }, + }, + }, + Status: v12.JobStatus{}, + }}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + j := &Job{ + Pod: tt.fields.Pod, + Name: tt.fields.Name, + Namespace: tt.fields.Namespace, + Labels: tt.fields.Labels, + Parallelism: tt.fields.Parallelism, + BackOffList: tt.fields.BackOffList, + } + if got := j.Template(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Template() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/kube/job/processor.go b/pkg/kube/job/processor.go new file mode 100644 index 00000000..87d8e747 --- /dev/null +++ b/pkg/kube/job/processor.go @@ -0,0 +1,117 @@ +package job + +import ( + "context" + "fmt" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + v1 "k8s.io/client-go/kubernetes/typed/batch/v1" + + "github.com/goto/entropy/pkg/errors" +) + +const ( + Invalid StatusType = iota + Success + Failed + Running + Ready + Finished +) + +var deletionPolicy = metav1.DeletePropagationForeground + +type ( + StatusType int + Processor struct { + Job *Job + Client v1.JobInterface + watch watch.Interface + JobDeleteOptions metav1.DeleteOptions + } +) + +type Status struct { + Status StatusType + Err error +} + +func (jp *Processor) GetWatch() watch.Interface { + return jp.watch +} + +func NewProcessor(job *Job, client v1.JobInterface) *Processor { + deleteOptions := metav1.DeleteOptions{PropagationPolicy: &deletionPolicy} + return &Processor{Job: job, Client: client, JobDeleteOptions: deleteOptions} +} + +func (jp *Processor) SubmitJob() error { + _, err := jp.Client.Create(context.Background(), jp.Job.Template(), metav1.CreateOptions{}) + return err +} + +func (jp *Processor) UpdateJob(suspend bool) error { + job, err := jp.Client.Get(context.Background(), jp.Job.Name, metav1.GetOptions{}) + if err != nil { + return err + } + job.Spec.Suspend = &suspend + _, err = jp.Client.Update(context.Background(), job, metav1.UpdateOptions{}) + return err +} + +func (jp *Processor) CreateWatch() error { + w, err := jp.Client.Watch(context.Background(), jp.Job.WatchOptions()) + jp.watch = w + return err +} + +func (jp *Processor) GetStatus() Status { + job, err := jp.Client.Get(context.Background(), jp.Job.Name, metav1.GetOptions{}) + status := Status{} + if err != nil { + status.Status = Invalid + status.Err = err + return status + } + if *job.Status.Ready >= 1 { + status.Status = Ready + return status + } + if job.Status.Active >= 1 { + status.Status = Running + return status + } + if job.Status.Succeeded >= 1 { + status.Status = Success + return status + } else { + zap.L().Error(fmt.Sprintf("JOB FAILED %v\n", job)) + status.Status = Failed + return status + } +} + +func (jp *Processor) DeleteJob() error { + return jp.Client.Delete(context.Background(), jp.Job.Name, jp.JobDeleteOptions) +} + +func (jp *Processor) WatchCompletion(exitChan chan Status) { + if jp.GetWatch() == nil { + exitChan <- Status{ + Status: Invalid, + Err: errors.New("watcher Object is not initialized"), + } + return + } + + for { + _, more := <-jp.GetWatch().ResultChan() + if !more { + break + } + } + exitChan <- Status{Status: Finished} +} diff --git a/pkg/kube/pod/pod.go b/pkg/kube/pod/pod.go new file mode 100644 index 00000000..a998057e --- /dev/null +++ b/pkg/kube/pod/pod.go @@ -0,0 +1,42 @@ +package pod + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/goto/entropy/pkg/kube/container" + "github.com/goto/entropy/pkg/kube/volume" +) + +type Pod struct { + Name string + Containers []container.Container + Volumes []volume.Volume + Labels map[string]string +} + +func (p Pod) Template() corev1.PodTemplateSpec { + var containers []corev1.Container + for _, c := range p.Containers { + containers = append(containers, c.Template()) + } + var volumes []corev1.Volume + for _, v := range p.Volumes { + volumes = append(volumes, v.GetPodVolume()) + } + volumes = append(volumes, corev1.Volume{ + Name: "shared-data", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }) + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.Name, + Labels: p.Labels, + }, + Spec: corev1.PodSpec{ + Containers: containers, + Volumes: volumes, + RestartPolicy: corev1.RestartPolicyNever, + }, + } +} diff --git a/pkg/kube/volume/Volume.go b/pkg/kube/volume/Volume.go new file mode 100644 index 00000000..896a29a1 --- /dev/null +++ b/pkg/kube/volume/Volume.go @@ -0,0 +1,36 @@ +package volume + +import ( + v1 "k8s.io/api/core/v1" +) + +const ( + Secret Source = iota + ConfigMap +) + +type Source int + +type Volume struct { + Kind Source + Name string + SourceName string +} + +func (v Volume) GetPodVolume() v1.Volume { + var vSource v1.VolumeSource + switch v.Kind { + case Secret: + vSource.Secret = &v1.SecretVolumeSource{ + SecretName: v.SourceName, + } + case ConfigMap: + vSource.ConfigMap = &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{Name: v.SourceName}, + } + } + return v1.Volume{ + Name: v.Name, + VolumeSource: vSource, + } +} diff --git a/pkg/kube/volume/Volume_test.go b/pkg/kube/volume/Volume_test.go new file mode 100644 index 00000000..682f6f94 --- /dev/null +++ b/pkg/kube/volume/Volume_test.go @@ -0,0 +1,44 @@ +package volume + +import ( + "reflect" + "testing" + + v1 "k8s.io/api/core/v1" +) + +func TestVolume_GetPodVolume(t *testing.T) { + type fields struct { + Kind Source + Name string + SourceName string + } + tests := []struct { + name string + fields fields + want v1.Volume + }{ + { + name: "Get Volume", fields: fields{ + Kind: ConfigMap, Name: "volume1", SourceName: "confMap1", + }, want: v1.Volume{ + Name: "volume1", + VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{Name: "confMap1"}, + }}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := Volume{ + Kind: tt.fields.Kind, + Name: tt.fields.Name, + SourceName: tt.fields.SourceName, + } + if got := v.GetPodVolume(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetPodVolume() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index ccc6861c..19454d35 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -9,11 +9,16 @@ type LogConfig struct { Level string `mapstructure:"level" default:"info"` } -func New(config *LogConfig) (*zap.Logger, error) { +func Setup(config *LogConfig) error { defaultConfig := zap.NewProductionConfig() defaultConfig.Level = zap.NewAtomicLevelAt(getZapLogLevelFromString(config.Level)) logger, err := zap.NewProductionConfig().Build() - return logger, err + if err != nil { + return err + } + // Setting up global Logger. This can be accessed by zap.L() + zap.ReplaceGlobals(logger) + return nil } func getZapLogLevelFromString(level string) zapcore.Level { diff --git a/pkg/telemetry/opencensus.go b/pkg/telemetry/opencensus.go deleted file mode 100644 index f4b79120..00000000 --- a/pkg/telemetry/opencensus.go +++ /dev/null @@ -1,168 +0,0 @@ -package telemetry - -import ( - "context" - "net/http" - - "contrib.go.opencensus.io/exporter/ocagent" - "contrib.go.opencensus.io/exporter/prometheus" - "github.com/newrelic/newrelic-opencensus-exporter-go/nrcensus" - "go.opencensus.io/plugin/ocgrpc" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/runmetrics" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "go.opencensus.io/zpages" -) - -func setupOpenCensus(ctx context.Context, mux *http.ServeMux, cfg Config) error { - trace.ApplyConfig(trace.Config{ - DefaultSampler: trace.ProbabilitySampler(cfg.SamplingFraction), - }) - - if cfg.EnableMemory || cfg.EnableCPU { - opts := runmetrics.RunMetricOptions{ - EnableCPU: cfg.EnableCPU, - EnableMemory: cfg.EnableMemory, - } - if err := runmetrics.Enable(opts); err != nil { - return err - } - } - - if err := setupViews(); err != nil { - return err - } - - if cfg.EnableNewrelic { - exporter, err := nrcensus.NewExporter(cfg.ServiceName, cfg.NewRelicAPIKey) - if err != nil { - return err - } - view.RegisterExporter(exporter) - trace.RegisterExporter(exporter) - } - - if cfg.EnableOtelAgent { - ocExporter, err := ocagent.NewExporter( - ocagent.WithServiceName(cfg.ServiceName), - ocagent.WithInsecure(), - ocagent.WithAddress(cfg.OpenTelAgentAddr), - ) - if err != nil { - return err - } - go func() { - <-ctx.Done() - _ = ocExporter.Stop() - }() - trace.RegisterExporter(ocExporter) - view.RegisterExporter(ocExporter) - } - - pe, err := prometheus.NewExporter(prometheus.Options{ - Namespace: cfg.ServiceName, - }) - if err != nil { - return err - } - view.RegisterExporter(pe) - mux.Handle("/metrics", pe) - - zpages.Handle(mux, "/debug") - return nil -} - -func setupViews() error { - if err := setupClientViews(); err != nil { - return err - } - - if err := setupServerViews(); err != nil { - return err - } - - return nil -} - -func setupServerViews() error { - serverViewTags := []tag.Key{ - ochttp.KeyServerRoute, - ochttp.Method, - } - - return view.Register( - &view.View{ - Name: "opencensus.io/http/server/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ochttp.ServerRequestBytes, - Aggregation: ochttp.DefaultSizeDistribution, - TagKeys: serverViewTags, - }, - &view.View{ - Name: "opencensus.io/http/server/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ochttp.ServerResponseBytes, - Aggregation: ochttp.DefaultSizeDistribution, - TagKeys: serverViewTags, - }, - &view.View{ - Name: "opencensus.io/http/server/latency", - Description: "Latency distribution of HTTP requests", - Measure: ochttp.ServerLatency, - Aggregation: ochttp.DefaultLatencyDistribution, - TagKeys: serverViewTags, - }, - &view.View{ - Name: "opencensus.io/http/server/request_count_by_method", - Description: "Server request count by HTTP method", - Measure: ochttp.ServerRequestCount, - Aggregation: view.Count(), - TagKeys: serverViewTags, - }, - &view.View{ - Name: "opencensus.io/http/server/response_count_by_status_code", - Description: "Server response count by status code", - TagKeys: append(serverViewTags, ochttp.StatusCode), - Measure: ochttp.ServerLatency, - Aggregation: view.Count(), - }, - ) -} - -func setupClientViews() error { - if err := view.Register(ocgrpc.DefaultClientViews...); err != nil { - return err - } - - clientViewTags := []tag.Key{ - ochttp.KeyClientMethod, - ochttp.KeyClientStatus, - ochttp.KeyClientHost, - } - - return view.Register( - &view.View{ - Name: "opencensus.io/http/client/roundtrip_latency", - Measure: ochttp.ClientRoundtripLatency, - Aggregation: ochttp.DefaultLatencyDistribution, - Description: "End-to-end latency, by HTTP method and response status", - TagKeys: clientViewTags, - }, - &view.View{ - Name: "opencensus.io/http/client/sent_bytes", - Measure: ochttp.ClientSentBytes, - Aggregation: ochttp.DefaultSizeDistribution, - Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", - TagKeys: clientViewTags, - }, - &view.View{ - Name: "opencensus.io/http/client/received_bytes", - Measure: ochttp.ClientReceivedBytes, - Aggregation: ochttp.DefaultSizeDistribution, - Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", - TagKeys: clientViewTags, - }, - ) -} diff --git a/pkg/telemetry/opentelemetry.go b/pkg/telemetry/opentelemetry.go new file mode 100644 index 00000000..6705bc37 --- /dev/null +++ b/pkg/telemetry/opentelemetry.go @@ -0,0 +1,90 @@ +package telemetry + +import ( + "context" + "net/http" + "time" + + "go.opentelemetry.io/contrib/instrumentation/runtime" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.21.0" +) + +func setupOpenTelemetry(ctx context.Context, mux *http.ServeMux, cfg Config) error { + var options []sdkmetric.Option + // Create resource with service information + res, err := resource.New(ctx, + resource.WithAttributes( + semconv.ServiceName(cfg.ServiceName), + ), + ) + if err != nil { + return err + } + + // Setup metrics if enabled + if cfg.EnableOtelAgent { + opt, err := setupOTELMetrics(ctx, cfg) + if err != nil { + return err + } + options = append(options, opt...) + } + + meterProvider := sdkmetric.NewMeterProvider( + append([]sdkmetric.Option{sdkmetric.WithResource(res)}, options...)..., + ) + + otel.SetMeterProvider(meterProvider) + + go func() { + <-ctx.Done() + if err := meterProvider.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }() + + return nil +} + +func setupOTELMetrics(ctx context.Context, cfg Config) ([]sdkmetric.Option, error) { + var sdkMetricOptions []sdkmetric.Option + var periodicReaderOptions []sdkmetric.PeriodicReaderOption + + promExporter, err := prometheus.New(prometheus.WithNamespace(cfg.ServiceName)) + if err != nil { + return nil, err + } + + otlpExporter, err := otlpmetricgrpc.New(ctx, + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithEndpoint(cfg.OpenTelAgentAddr), + ) + if err != nil { + return nil, err + } + + periodicReaderOptions = append(periodicReaderOptions, sdkmetric.WithInterval(10*time.Second)) + + if cfg.EnableRuntimeMetrics { + periodicReaderOptions = append(periodicReaderOptions, sdkmetric.WithProducer(runtime.NewProducer())) + } + + sdkMetricOptions = append(sdkMetricOptions, sdkmetric.WithReader(promExporter)) + sdkMetricOptions = append(sdkMetricOptions, sdkmetric.WithReader(sdkmetric.NewPeriodicReader( + otlpExporter, + periodicReaderOptions..., + ))) + + return sdkMetricOptions, nil + +} + +func GetMeter(name string) metric.Meter { + return otel.GetMeterProvider().Meter(name) +} diff --git a/pkg/telemetry/telemetry.go b/pkg/telemetry/telemetry.go index e0ed24e2..95b790d1 100644 --- a/pkg/telemetry/telemetry.go +++ b/pkg/telemetry/telemetry.go @@ -10,18 +10,15 @@ import ( type Config struct { // Debug sets the bind address for pprof & zpages server. - Debug string `mapstructure:"debug_addr"` + Debug string `mapstructure:"debug_addr" default:"localhost:8090"` - // OpenCensus trace & metrics configurations. - EnableCPU bool `mapstructure:"enable_cpu"` - EnableMemory bool `mapstructure:"enable_memory"` - SamplingFraction float64 `mapstructure:"sampling_fraction"` + // OpenTelemetry trace & metrics configurations. + EnableRuntimeMetrics bool `mapstructure:"enable_runtime_metrics"` - // OpenCensus exporter configurations. + // OpenTelemetry exporter configurations. ServiceName string `mapstructure:"service_name"` - // NewRelic exporter. - EnableNewrelic bool `mapstructure:"enable_newrelic"` + // NewRelic configs. NewRelicAPIKey string `mapstructure:"newrelic_api_key"` // OpenTelemetry Agent exporter. @@ -29,23 +26,26 @@ type Config struct { OpenTelAgentAddr string `mapstructure:"otel_agent_addr"` } -// Init initialises OpenCensus based async-telemetry processes and +// Init initialises OpenTelemetry based async-telemetry processes and // returns (i.e., it does not block). -func Init(ctx context.Context, cfg Config, lg *zap.Logger) { +func Init(ctx context.Context, cfg Config) { mux := http.NewServeMux() mux.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) mux.Handle("/debug/pprof/heap", pprof.Handler("heap")) mux.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate")) mux.Handle("/debug/pprof/block", pprof.Handler("block")) - if err := setupOpenCensus(ctx, mux, cfg); err != nil { - lg.Error("failed to setup OpenCensus", zap.Error(err)) + if err := setupOpenTelemetry(ctx, mux, cfg); err != nil { + zap.L().Error("failed to setup OpenTelemetry", zap.Error(err)) } + zap.L().Info("telemetry server started", zap.String("debug_addr", cfg.Debug)) + if cfg.Debug != "" { go func() { + //nolint:gosec if err := http.ListenAndServe(cfg.Debug, mux); err != nil { - lg.Error("debug server exited due to error", zap.Error(err)) + zap.L().Error("debug server exited due to error", zap.Error(err)) } }() } diff --git a/pkg/validator/validator.go b/pkg/validator/validator.go new file mode 100644 index 00000000..635dd3b4 --- /dev/null +++ b/pkg/validator/validator.go @@ -0,0 +1,56 @@ +package validator + +import ( + "fmt" + "strings" + + "github.com/go-playground/validator/v10" + "github.com/xeipuuv/gojsonschema" + + "github.com/goto/entropy/pkg/errors" +) + +// FromJSONSchema returns a validator that can validate using a JSON schema. +func FromJSONSchema(schemaVal []byte) func(jsonVal []byte) error { + schema, schemaErr := gojsonschema.NewSchema(gojsonschema.NewBytesLoader(schemaVal)) + + return func(jsonVal []byte) error { + if schemaErr != nil { + return errors.ErrInternal.WithCausef(schemaErr.Error()) + } + + result, err := schema.Validate(gojsonschema.NewBytesLoader(jsonVal)) + if err != nil { + return errors.ErrInternal.WithCausef(err.Error()) + } else if !result.Valid() { + var errorStrings []string + for _, resultErr := range result.Errors() { + errorStrings = append(errorStrings, resultErr.String()) + } + errorString := strings.Join(errorStrings, "\n") + return errors.ErrInvalid.WithMsgf(errorString) + } + + return nil + } +} + +// TaggedStruct validates the given struct-value using go-validate package +// based on 'validate' tags. +func TaggedStruct(structVal any) error { + err := validator.New().Struct(structVal) + if err != nil { + var fields []string + + var valErr *validator.ValidationErrors + if errors.As(err, &valErr) { + for _, fieldError := range *valErr { + fields = append(fields, fmt.Sprintf("%s: %s", fieldError.Field(), fieldError.Tag())) + } + return errors.ErrInvalid.WithMsgf("invalid values for fields").WithCausef(strings.Join(fields, ", ")) + } else { + return errors.ErrInvalid.WithCausef(err.Error()) + } + } + return nil +} diff --git a/pkg/version/version.go b/pkg/version/version.go index 5ae42c77..6e08dc28 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -5,9 +5,10 @@ import ( "runtime" "time" - commonv1 "go.buf.build/odpf/gw/odpf/proton/odpf/common/v1" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/timestamppb" + + commonv1 "github.com/goto/entropy/proto/gotocompany/common/v1" ) var ( @@ -33,6 +34,6 @@ func GetVersionAndBuildInfo() *commonv1.Version { } func Print() error { - _, err := fmt.Println(protojson.Format(GetVersionAndBuildInfo())) //nolint + _, err := fmt.Println(protojson.Format(GetVersionAndBuildInfo())) return err } diff --git a/pkg/worker/example/main.go b/pkg/worker/example/main.go index ec3c97e9..fada2325 100644 --- a/pkg/worker/example/main.go +++ b/pkg/worker/example/main.go @@ -9,9 +9,9 @@ import ( "go.uber.org/zap" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/worker" - "github.com/odpf/entropy/pkg/worker/pgq" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/worker" + "github.com/goto/entropy/pkg/worker/pgq" ) var ( diff --git a/pkg/worker/job_test.go b/pkg/worker/job_test.go index 70ecf616..f47e36d2 100644 --- a/pkg/worker/job_test.go +++ b/pkg/worker/job_test.go @@ -8,8 +8,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/worker" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/worker" ) func TestJob_Attempt(t *testing.T) { diff --git a/pkg/worker/mocks/job_queue.go b/pkg/worker/mocks/job_queue.go index 19e4bb79..9fbf25b5 100644 --- a/pkg/worker/mocks/job_queue.go +++ b/pkg/worker/mocks/job_queue.go @@ -1,11 +1,11 @@ -// Code generated by mockery v2.10.4. DO NOT EDIT. +// Code generated by mockery v2.42.1. DO NOT EDIT. package mocks import ( context "context" - worker "github.com/odpf/entropy/pkg/worker" + worker "github.com/goto/entropy/pkg/worker" mock "github.com/stretchr/testify/mock" ) @@ -26,6 +26,10 @@ func (_m *JobQueue) EXPECT() *JobQueue_Expecter { func (_m *JobQueue) Dequeue(ctx context.Context, kinds []string, fn worker.DequeueFn) error { ret := _m.Called(ctx, kinds, fn) + if len(ret) == 0 { + panic("no return value specified for Dequeue") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []string, worker.DequeueFn) error); ok { r0 = rf(ctx, kinds, fn) @@ -42,9 +46,9 @@ type JobQueue_Dequeue_Call struct { } // Dequeue is a helper method to define mock.On call -// - ctx context.Context -// - kinds []string -// - fn worker.DequeueFn +// - ctx context.Context +// - kinds []string +// - fn worker.DequeueFn func (_e *JobQueue_Expecter) Dequeue(ctx interface{}, kinds interface{}, fn interface{}) *JobQueue_Dequeue_Call { return &JobQueue_Dequeue_Call{Call: _e.mock.On("Dequeue", ctx, kinds, fn)} } @@ -61,6 +65,11 @@ func (_c *JobQueue_Dequeue_Call) Return(_a0 error) *JobQueue_Dequeue_Call { return _c } +func (_c *JobQueue_Dequeue_Call) RunAndReturn(run func(context.Context, []string, worker.DequeueFn) error) *JobQueue_Dequeue_Call { + _c.Call.Return(run) + return _c +} + // Enqueue provides a mock function with given fields: ctx, jobs func (_m *JobQueue) Enqueue(ctx context.Context, jobs ...worker.Job) error { _va := make([]interface{}, len(jobs)) @@ -72,6 +81,10 @@ func (_m *JobQueue) Enqueue(ctx context.Context, jobs ...worker.Job) error { _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Enqueue") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, ...worker.Job) error); ok { r0 = rf(ctx, jobs...) @@ -88,8 +101,8 @@ type JobQueue_Enqueue_Call struct { } // Enqueue is a helper method to define mock.On call -// - ctx context.Context -// - jobs ...worker.Job +// - ctx context.Context +// - jobs ...worker.Job func (_e *JobQueue_Expecter) Enqueue(ctx interface{}, jobs ...interface{}) *JobQueue_Enqueue_Call { return &JobQueue_Enqueue_Call{Call: _e.mock.On("Enqueue", append([]interface{}{ctx}, jobs...)...)} @@ -112,3 +125,22 @@ func (_c *JobQueue_Enqueue_Call) Return(_a0 error) *JobQueue_Enqueue_Call { _c.Call.Return(_a0) return _c } + +func (_c *JobQueue_Enqueue_Call) RunAndReturn(run func(context.Context, ...worker.Job) error) *JobQueue_Enqueue_Call { + _c.Call.Return(run) + return _c +} + +// NewJobQueue creates a new instance of JobQueue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewJobQueue(t interface { + mock.TestingT + Cleanup(func()) +}) *JobQueue { + mock := &JobQueue{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/worker/pgq/pgq.go b/pkg/worker/pgq/pgq.go index 6113b184..754ef1a6 100644 --- a/pkg/worker/pgq/pgq.go +++ b/pkg/worker/pgq/pgq.go @@ -12,8 +12,8 @@ import ( sq "github.com/Masterminds/squirrel" "github.com/lib/pq" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/worker" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/worker" ) const ( diff --git a/pkg/worker/pgq/pgq_utils.go b/pkg/worker/pgq/pgq_utils.go index 224ec817..fdd48ae4 100644 --- a/pkg/worker/pgq/pgq_utils.go +++ b/pkg/worker/pgq/pgq_utils.go @@ -7,7 +7,7 @@ import ( sq "github.com/Masterminds/squirrel" - "github.com/odpf/entropy/pkg/worker" + "github.com/goto/entropy/pkg/worker" ) type txnFn func(ctx context.Context, tx *sql.Tx) error diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go index c5822957..6a2d1d7f 100644 --- a/pkg/worker/worker.go +++ b/pkg/worker/worker.go @@ -8,7 +8,7 @@ import ( "go.uber.org/zap" - "github.com/odpf/entropy/pkg/errors" + "github.com/goto/entropy/pkg/errors" ) // Worker provides asynchronous job processing using a job-queue. @@ -127,6 +127,7 @@ func (w *Worker) handleJob(ctx context.Context, job Job) (*Job, error) { w.logger.Info("got a pending job", zap.String("job_id", job.ID), zap.String("job_kind", job.Kind), + zap.String("status", job.Status), ) fn, exists := w.handlers[job.Kind] @@ -140,6 +141,13 @@ func (w *Worker) handleJob(ctx context.Context, job Job) (*Job, error) { } job.Attempt(ctx, time.Now(), fn) + + w.logger.Info("job attempted", + zap.String("job_id", job.ID), + zap.String("job_kind", job.Kind), + zap.String("status", job.Status), + zap.String("last_error", job.LastError), + ) return &job, nil } diff --git a/pkg/worker/worker_test.go b/pkg/worker/worker_test.go index 3dcd4bbe..6392a0fa 100644 --- a/pkg/worker/worker_test.go +++ b/pkg/worker/worker_test.go @@ -9,9 +9,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/odpf/entropy/pkg/errors" - "github.com/odpf/entropy/pkg/worker" - "github.com/odpf/entropy/pkg/worker/mocks" + "github.com/goto/entropy/pkg/errors" + "github.com/goto/entropy/pkg/worker" + "github.com/goto/entropy/pkg/worker/mocks" ) func Test_New(t *testing.T) { diff --git a/proto/entropy.swagger.yaml b/proto/entropy.swagger.yaml new file mode 100644 index 00000000..af46685d --- /dev/null +++ b/proto/entropy.swagger.yaml @@ -0,0 +1,617 @@ +swagger: "2.0" +info: + title: gotocompany/common/v1/service.proto + version: 0.1.0 +tags: + - name: CommonService + - name: ModuleService + - name: ResourceService +schemes: + - http +consumes: + - application/json +produces: + - application/json +paths: + /v1beta1/modules: + get: + operationId: ModuleService_ListModules + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/ListModulesResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: project + in: query + required: false + type: string + tags: + - ModuleService + post: + operationId: ModuleService_CreateModule + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/CreateModuleResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: module + in: body + required: true + schema: + $ref: '#/definitions/Module' + tags: + - ModuleService + /v1beta1/modules/{urn}: + get: + operationId: ModuleService_GetModule + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/GetModuleResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + tags: + - ModuleService + delete: + operationId: ModuleService_DeleteModule + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/DeleteModuleResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + tags: + - ModuleService + patch: + operationId: ModuleService_UpdateModule + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/UpdateModuleResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + - name: body + in: body + required: true + schema: + type: object + properties: + configs: {} + tags: + - ModuleService + /v1beta1/resources: + get: + operationId: ResourceService_ListResources + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/ListResourcesResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: project + in: query + required: false + type: string + - name: kind + in: query + required: false + type: string + - name: with_spec_configs + description: |- + this toggle if set, will return spec configs as well. + it's default value is false. + in: query + required: false + type: boolean + - name: page_size + in: query + required: false + type: integer + format: int32 + - name: page_num + in: query + required: false + type: integer + format: int32 + tags: + - ResourceService + post: + operationId: ResourceService_CreateResource + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/CreateResourceResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: resource + in: body + required: true + schema: + $ref: '#/definitions/Resource' + - name: dry_run + in: query + required: false + type: boolean + tags: + - ResourceService + /v1beta1/resources/{urn}: + get: + operationId: ResourceService_GetResource + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/GetResourceResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + tags: + - ResourceService + delete: + operationId: ResourceService_DeleteResource + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/DeleteResourceResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + tags: + - ResourceService + patch: + operationId: ResourceService_UpdateResource + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/UpdateResourceResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + - name: body + in: body + required: true + schema: + type: object + properties: + new_spec: + $ref: '#/definitions/ResourceSpec' + labels: + type: object + additionalProperties: + type: string + dry_run: + type: boolean + tags: + - ResourceService + /v1beta1/resources/{urn}/actions/{action}: + post: + operationId: ResourceService_ApplyAction + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/ApplyActionResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + - name: action + in: path + required: true + type: string + - name: params + in: body + required: true + schema: {} + - name: dry_run + in: query + required: false + type: boolean + tags: + - ResourceService + /v1beta1/resources/{urn}/logs: + get: + operationId: ResourceService_GetLog + responses: + "200": + description: A successful response.(streaming responses) + schema: + type: object + properties: + result: + $ref: '#/definitions/GetLogResponse' + error: + $ref: '#/definitions/rpc.Status' + title: Stream result of GetLogResponse + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + tags: + - ResourceService + /v1beta1/resources/{urn}/revisions: + get: + operationId: ResourceService_GetResourceRevisions + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/GetResourceRevisionsResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: urn + in: path + required: true + type: string + tags: + - ResourceService + /v1/version: + post: + operationId: CommonService_GetVersion + responses: + "200": + description: A successful response. + schema: + $ref: '#/definitions/GetVersionResponse' + default: + description: An unexpected error response. + schema: + $ref: '#/definitions/rpc.Status' + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/GetVersionRequest' + tags: + - CommonService +definitions: + Any: + type: object + properties: + '@type': + type: string + additionalProperties: {} + ApplyActionResponse: + type: object + properties: + resource: + $ref: '#/definitions/Resource' + CreateModuleResponse: + type: object + properties: + module: + $ref: '#/definitions/Module' + CreateResourceResponse: + type: object + properties: + resource: + $ref: '#/definitions/Resource' + DeleteModuleResponse: + type: object + DeleteResourceResponse: + type: object + GetLogResponse: + type: object + properties: + chunk: + $ref: '#/definitions/LogChunk' + GetModuleResponse: + type: object + properties: + module: + $ref: '#/definitions/Module' + GetResourceResponse: + type: object + properties: + resource: + $ref: '#/definitions/Resource' + GetResourceRevisionsResponse: + type: object + properties: + revisions: + type: array + items: + type: object + $ref: '#/definitions/ResourceRevision' + GetVersionRequest: + type: object + properties: + client: + $ref: '#/definitions/Version' + GetVersionResponse: + type: object + properties: + server: + $ref: '#/definitions/Version' + ListModulesResponse: + type: object + properties: + modules: + type: array + items: + type: object + $ref: '#/definitions/Module' + ListResourcesResponse: + type: object + properties: + resources: + type: array + items: + type: object + $ref: '#/definitions/Resource' + count: + type: integer + format: int32 + ListString: + type: object + properties: + values: + type: array + items: + type: string + LogChunk: + type: object + properties: + data: + type: string + format: byte + labels: + type: object + additionalProperties: + type: string + LogOptions: + type: object + properties: + filters: + type: object + additionalProperties: + $ref: '#/definitions/ListString' + Module: + type: object + properties: + urn: + type: string + name: + type: string + project: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + configs: {} + NullValue: + type: string + enum: + - NULL_VALUE + default: NULL_VALUE + description: |- + `NullValue` is a singleton enumeration to represent the null value for the + `Value` type union. + + The JSON representation for `NullValue` is JSON `null`. + + - NULL_VALUE: Null value. + Resource: + type: object + properties: + urn: + type: string + kind: + type: string + name: + type: string + project: + type: string + labels: + type: object + additionalProperties: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + spec: + $ref: '#/definitions/ResourceSpec' + state: + $ref: '#/definitions/ResourceState' + created_by: + type: string + updated_by: + type: string + ResourceDependency: + type: object + properties: + key: + type: string + description: |- + Key should be as defined by the module being used for + the resource. + value: + type: string + description: Value should refer to an existing resource via URN. + ResourceRevision: + type: object + properties: + id: + type: string + urn: + type: string + labels: + type: object + additionalProperties: + type: string + created_at: + type: string + format: date-time + spec: + $ref: '#/definitions/ResourceSpec' + reason: + type: string + created_by: + type: string + ResourceSpec: + type: object + properties: + configs: {} + dependencies: + type: array + items: + type: object + $ref: '#/definitions/ResourceDependency' + description: |- + dependencies can be used to refer to other existing resources + as dependency of this resource. + ResourceState: + type: object + properties: + status: + $ref: '#/definitions/ResourceState.Status' + output: {} + module_data: + type: string + format: byte + log_options: + $ref: '#/definitions/LogOptions' + sync_retries: + type: integer + format: int32 + description: |- + information about the ongoing sync process. + if status is ERROR / PENDING, this value can be used to understand + the issue. + sync_last_error: + type: string + next_sync_at: + type: string + format: date-time + ResourceState.Status: + type: string + enum: + - STATUS_UNSPECIFIED + - STATUS_PENDING + - STATUS_ERROR + - STATUS_DELETED + - STATUS_COMPLETED + default: STATUS_UNSPECIFIED + UpdateModuleResponse: + type: object + properties: + module: + $ref: '#/definitions/Module' + UpdateResourceResponse: + type: object + properties: + resource: + $ref: '#/definitions/Resource' + Version: + type: object + properties: + version: + type: string + commit: + type: string + build_time: + type: string + format: date-time + lang_version: + type: string + os: + type: string + architecture: + type: string + rpc.Status: + type: object + properties: + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + $ref: '#/definitions/Any' +externalDocs: + description: Common endpoints for all services diff --git a/proto/gotocompany/common/v1/service.pb.go b/proto/gotocompany/common/v1/service.pb.go new file mode 100644 index 00000000..1672caf2 --- /dev/null +++ b/proto/gotocompany/common/v1/service.pb.go @@ -0,0 +1,357 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: gotocompany/common/v1/service.proto + +package v1 + +import ( + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options" + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Client *Version `protobuf:"bytes,1,opt,name=client,proto3" json:"client,omitempty"` +} + +func (x *GetVersionRequest) Reset() { + *x = GetVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_common_v1_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVersionRequest) ProtoMessage() {} + +func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_common_v1_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. +func (*GetVersionRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_common_v1_service_proto_rawDescGZIP(), []int{0} +} + +func (x *GetVersionRequest) GetClient() *Version { + if x != nil { + return x.Client + } + return nil +} + +type GetVersionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Server *Version `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` +} + +func (x *GetVersionResponse) Reset() { + *x = GetVersionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_common_v1_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVersionResponse) ProtoMessage() {} + +func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_common_v1_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. +func (*GetVersionResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_common_v1_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetVersionResponse) GetServer() *Version { + if x != nil { + return x.Server + } + return nil +} + +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Commit string `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` + BuildTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=build_time,json=buildTime,proto3" json:"build_time,omitempty"` + LangVersion string `protobuf:"bytes,4,opt,name=lang_version,json=langVersion,proto3" json:"lang_version,omitempty"` + Os string `protobuf:"bytes,5,opt,name=os,proto3" json:"os,omitempty"` + Architecture string `protobuf:"bytes,6,opt,name=architecture,proto3" json:"architecture,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_common_v1_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_common_v1_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_gotocompany_common_v1_service_proto_rawDescGZIP(), []int{2} +} + +func (x *Version) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Version) GetCommit() string { + if x != nil { + return x.Commit + } + return "" +} + +func (x *Version) GetBuildTime() *timestamppb.Timestamp { + if x != nil { + return x.BuildTime + } + return nil +} + +func (x *Version) GetLangVersion() string { + if x != nil { + return x.LangVersion + } + return "" +} + +func (x *Version) GetOs() string { + if x != nil { + return x.Os + } + return "" +} + +func (x *Version) GetArchitecture() string { + if x != nil { + return x.Architecture + } + return "" +} + +var File_gotocompany_common_v1_service_proto protoreflect.FileDescriptor + +var file_gotocompany_common_v1_service_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, + 0x6e, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x36, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x22, 0x4c, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, + 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, 0xcd, 0x01, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x61, 0x6e, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x6f, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, + 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, + 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x32, 0x8a, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x79, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x10, 0x3a, 0x01, 0x2a, 0x22, 0x0b, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x42, 0x8b, 0x01, 0x92, 0x41, 0x31, 0x12, 0x07, 0x32, 0x05, 0x30, 0x2e, 0x31, + 0x2e, 0x30, 0x2a, 0x01, 0x01, 0x72, 0x23, 0x0a, 0x21, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, + 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x6c, + 0x6c, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x6e, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x20, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_gotocompany_common_v1_service_proto_rawDescOnce sync.Once + file_gotocompany_common_v1_service_proto_rawDescData = file_gotocompany_common_v1_service_proto_rawDesc +) + +func file_gotocompany_common_v1_service_proto_rawDescGZIP() []byte { + file_gotocompany_common_v1_service_proto_rawDescOnce.Do(func() { + file_gotocompany_common_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_gotocompany_common_v1_service_proto_rawDescData) + }) + return file_gotocompany_common_v1_service_proto_rawDescData +} + +var file_gotocompany_common_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_gotocompany_common_v1_service_proto_goTypes = []interface{}{ + (*GetVersionRequest)(nil), // 0: gotocompany.common.v1.GetVersionRequest + (*GetVersionResponse)(nil), // 1: gotocompany.common.v1.GetVersionResponse + (*Version)(nil), // 2: gotocompany.common.v1.Version + (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp +} +var file_gotocompany_common_v1_service_proto_depIdxs = []int32{ + 2, // 0: gotocompany.common.v1.GetVersionRequest.client:type_name -> gotocompany.common.v1.Version + 2, // 1: gotocompany.common.v1.GetVersionResponse.server:type_name -> gotocompany.common.v1.Version + 3, // 2: gotocompany.common.v1.Version.build_time:type_name -> google.protobuf.Timestamp + 0, // 3: gotocompany.common.v1.CommonService.GetVersion:input_type -> gotocompany.common.v1.GetVersionRequest + 1, // 4: gotocompany.common.v1.CommonService.GetVersion:output_type -> gotocompany.common.v1.GetVersionResponse + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_gotocompany_common_v1_service_proto_init() } +func file_gotocompany_common_v1_service_proto_init() { + if File_gotocompany_common_v1_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_gotocompany_common_v1_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_common_v1_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_common_v1_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_gotocompany_common_v1_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_gotocompany_common_v1_service_proto_goTypes, + DependencyIndexes: file_gotocompany_common_v1_service_proto_depIdxs, + MessageInfos: file_gotocompany_common_v1_service_proto_msgTypes, + }.Build() + File_gotocompany_common_v1_service_proto = out.File + file_gotocompany_common_v1_service_proto_rawDesc = nil + file_gotocompany_common_v1_service_proto_goTypes = nil + file_gotocompany_common_v1_service_proto_depIdxs = nil +} diff --git a/proto/gotocompany/common/v1/service.pb.gw.go b/proto/gotocompany/common/v1/service.pb.gw.go new file mode 100644 index 00000000..43e57236 --- /dev/null +++ b/proto/gotocompany/common/v1/service.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: gotocompany/common/v1/service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_CommonService_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, client CommonServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetVersionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_CommonService_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, server CommonServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetVersionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetVersion(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterCommonServiceHandlerServer registers the http handlers for service CommonService to "mux". +// UnaryRPC :call CommonServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterCommonServiceHandlerFromEndpoint instead. +func RegisterCommonServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server CommonServiceServer) error { + + mux.Handle("POST", pattern_CommonService_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.common.v1.CommonService/GetVersion", runtime.WithHTTPPathPattern("/v1/version")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_CommonService_GetVersion_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_CommonService_GetVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterCommonServiceHandlerFromEndpoint is same as RegisterCommonServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterCommonServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterCommonServiceHandler(ctx, mux, conn) +} + +// RegisterCommonServiceHandler registers the http handlers for service CommonService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterCommonServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterCommonServiceHandlerClient(ctx, mux, NewCommonServiceClient(conn)) +} + +// RegisterCommonServiceHandlerClient registers the http handlers for service CommonService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "CommonServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "CommonServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "CommonServiceClient" to call the correct interceptors. +func RegisterCommonServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client CommonServiceClient) error { + + mux.Handle("POST", pattern_CommonService_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.common.v1.CommonService/GetVersion", runtime.WithHTTPPathPattern("/v1/version")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_CommonService_GetVersion_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_CommonService_GetVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_CommonService_GetVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "version"}, "")) +) + +var ( + forward_CommonService_GetVersion_0 = runtime.ForwardResponseMessage +) diff --git a/proto/gotocompany/common/v1/service.pb.validate.go b/proto/gotocompany/common/v1/service.pb.validate.go new file mode 100644 index 00000000..05cb7d8e --- /dev/null +++ b/proto/gotocompany/common/v1/service.pb.validate.go @@ -0,0 +1,435 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: gotocompany/common/v1/service.proto + +package v1 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on GetVersionRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetVersionRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetVersionRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetVersionRequestMultiError, or nil if none found. +func (m *GetVersionRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetVersionRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetClient()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetVersionRequestValidationError{ + field: "Client", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetVersionRequestValidationError{ + field: "Client", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetClient()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetVersionRequestValidationError{ + field: "Client", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetVersionRequestMultiError(errors) + } + + return nil +} + +// GetVersionRequestMultiError is an error wrapping multiple validation errors +// returned by GetVersionRequest.ValidateAll() if the designated constraints +// aren't met. +type GetVersionRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetVersionRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetVersionRequestMultiError) AllErrors() []error { return m } + +// GetVersionRequestValidationError is the validation error returned by +// GetVersionRequest.Validate if the designated constraints aren't met. +type GetVersionRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetVersionRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetVersionRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetVersionRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetVersionRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetVersionRequestValidationError) ErrorName() string { + return "GetVersionRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetVersionRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetVersionRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetVersionRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetVersionRequestValidationError{} + +// Validate checks the field values on GetVersionResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetVersionResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetVersionResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetVersionResponseMultiError, or nil if none found. +func (m *GetVersionResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetVersionResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetServer()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetVersionResponseValidationError{ + field: "Server", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetVersionResponseValidationError{ + field: "Server", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetServer()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetVersionResponseValidationError{ + field: "Server", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetVersionResponseMultiError(errors) + } + + return nil +} + +// GetVersionResponseMultiError is an error wrapping multiple validation errors +// returned by GetVersionResponse.ValidateAll() if the designated constraints +// aren't met. +type GetVersionResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetVersionResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetVersionResponseMultiError) AllErrors() []error { return m } + +// GetVersionResponseValidationError is the validation error returned by +// GetVersionResponse.Validate if the designated constraints aren't met. +type GetVersionResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetVersionResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetVersionResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetVersionResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetVersionResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetVersionResponseValidationError) ErrorName() string { + return "GetVersionResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetVersionResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetVersionResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetVersionResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetVersionResponseValidationError{} + +// Validate checks the field values on Version with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Version) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Version with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in VersionMultiError, or nil if none found. +func (m *Version) ValidateAll() error { + return m.validate(true) +} + +func (m *Version) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Version + + // no validation rules for Commit + + if all { + switch v := interface{}(m.GetBuildTime()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, VersionValidationError{ + field: "BuildTime", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, VersionValidationError{ + field: "BuildTime", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBuildTime()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return VersionValidationError{ + field: "BuildTime", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for LangVersion + + // no validation rules for Os + + // no validation rules for Architecture + + if len(errors) > 0 { + return VersionMultiError(errors) + } + + return nil +} + +// VersionMultiError is an error wrapping multiple validation errors returned +// by Version.ValidateAll() if the designated constraints aren't met. +type VersionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m VersionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m VersionMultiError) AllErrors() []error { return m } + +// VersionValidationError is the validation error returned by Version.Validate +// if the designated constraints aren't met. +type VersionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e VersionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e VersionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e VersionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e VersionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e VersionValidationError) ErrorName() string { return "VersionValidationError" } + +// Error satisfies the builtin error interface +func (e VersionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sVersion.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = VersionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = VersionValidationError{} diff --git a/proto/gotocompany/common/v1/service_grpc.pb.go b/proto/gotocompany/common/v1/service_grpc.pb.go new file mode 100644 index 00000000..ecad6c42 --- /dev/null +++ b/proto/gotocompany/common/v1/service_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: gotocompany/common/v1/service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + CommonService_GetVersion_FullMethodName = "/gotocompany.common.v1.CommonService/GetVersion" +) + +// CommonServiceClient is the client API for CommonService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CommonServiceClient interface { + GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) +} + +type commonServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCommonServiceClient(cc grpc.ClientConnInterface) CommonServiceClient { + return &commonServiceClient{cc} +} + +func (c *commonServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { + out := new(GetVersionResponse) + err := c.cc.Invoke(ctx, CommonService_GetVersion_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CommonServiceServer is the server API for CommonService service. +// All implementations must embed UnimplementedCommonServiceServer +// for forward compatibility +type CommonServiceServer interface { + GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) + mustEmbedUnimplementedCommonServiceServer() +} + +// UnimplementedCommonServiceServer must be embedded to have forward compatible implementations. +type UnimplementedCommonServiceServer struct { +} + +func (UnimplementedCommonServiceServer) GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") +} +func (UnimplementedCommonServiceServer) mustEmbedUnimplementedCommonServiceServer() {} + +// UnsafeCommonServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CommonServiceServer will +// result in compilation errors. +type UnsafeCommonServiceServer interface { + mustEmbedUnimplementedCommonServiceServer() +} + +func RegisterCommonServiceServer(s grpc.ServiceRegistrar, srv CommonServiceServer) { + s.RegisterService(&CommonService_ServiceDesc, srv) +} + +func _CommonService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CommonServiceServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CommonService_GetVersion_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CommonServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CommonService_ServiceDesc is the grpc.ServiceDesc for CommonService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CommonService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "gotocompany.common.v1.CommonService", + HandlerType: (*CommonServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetVersion", + Handler: _CommonService_GetVersion_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "gotocompany/common/v1/service.proto", +} diff --git a/proto/gotocompany/entropy/v1beta1/module.pb.go b/proto/gotocompany/entropy/v1beta1/module.pb.go new file mode 100644 index 00000000..dd6acd0f --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/module.pb.go @@ -0,0 +1,922 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: gotocompany/entropy/v1beta1/module.proto + +package entropyv1beta1 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Module struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Configs *structpb.Value `protobuf:"bytes,7,opt,name=configs,proto3" json:"configs,omitempty"` +} + +func (x *Module) Reset() { + *x = Module{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Module) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Module) ProtoMessage() {} + +func (x *Module) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Module.ProtoReflect.Descriptor instead. +func (*Module) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{0} +} + +func (x *Module) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +func (x *Module) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Module) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *Module) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Module) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Module) GetConfigs() *structpb.Value { + if x != nil { + return x.Configs + } + return nil +} + +type ListModulesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *ListModulesRequest) Reset() { + *x = ListModulesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListModulesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListModulesRequest) ProtoMessage() {} + +func (x *ListModulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListModulesRequest.ProtoReflect.Descriptor instead. +func (*ListModulesRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{1} +} + +func (x *ListModulesRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +type ListModulesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Modules []*Module `protobuf:"bytes,1,rep,name=modules,proto3" json:"modules,omitempty"` +} + +func (x *ListModulesResponse) Reset() { + *x = ListModulesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListModulesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListModulesResponse) ProtoMessage() {} + +func (x *ListModulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListModulesResponse.ProtoReflect.Descriptor instead. +func (*ListModulesResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{2} +} + +func (x *ListModulesResponse) GetModules() []*Module { + if x != nil { + return x.Modules + } + return nil +} + +type GetModuleRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` +} + +func (x *GetModuleRequest) Reset() { + *x = GetModuleRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetModuleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetModuleRequest) ProtoMessage() {} + +func (x *GetModuleRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetModuleRequest.ProtoReflect.Descriptor instead. +func (*GetModuleRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{3} +} + +func (x *GetModuleRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +type GetModuleResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Module *Module `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` +} + +func (x *GetModuleResponse) Reset() { + *x = GetModuleResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetModuleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetModuleResponse) ProtoMessage() {} + +func (x *GetModuleResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetModuleResponse.ProtoReflect.Descriptor instead. +func (*GetModuleResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{4} +} + +func (x *GetModuleResponse) GetModule() *Module { + if x != nil { + return x.Module + } + return nil +} + +type CreateModuleRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Module *Module `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` +} + +func (x *CreateModuleRequest) Reset() { + *x = CreateModuleRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateModuleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateModuleRequest) ProtoMessage() {} + +func (x *CreateModuleRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateModuleRequest.ProtoReflect.Descriptor instead. +func (*CreateModuleRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{5} +} + +func (x *CreateModuleRequest) GetModule() *Module { + if x != nil { + return x.Module + } + return nil +} + +type CreateModuleResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Module *Module `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` +} + +func (x *CreateModuleResponse) Reset() { + *x = CreateModuleResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateModuleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateModuleResponse) ProtoMessage() {} + +func (x *CreateModuleResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateModuleResponse.ProtoReflect.Descriptor instead. +func (*CreateModuleResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateModuleResponse) GetModule() *Module { + if x != nil { + return x.Module + } + return nil +} + +type UpdateModuleRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` + Configs *structpb.Value `protobuf:"bytes,3,opt,name=configs,proto3" json:"configs,omitempty"` +} + +func (x *UpdateModuleRequest) Reset() { + *x = UpdateModuleRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateModuleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateModuleRequest) ProtoMessage() {} + +func (x *UpdateModuleRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateModuleRequest.ProtoReflect.Descriptor instead. +func (*UpdateModuleRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateModuleRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +func (x *UpdateModuleRequest) GetConfigs() *structpb.Value { + if x != nil { + return x.Configs + } + return nil +} + +type UpdateModuleResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Module *Module `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` +} + +func (x *UpdateModuleResponse) Reset() { + *x = UpdateModuleResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateModuleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateModuleResponse) ProtoMessage() {} + +func (x *UpdateModuleResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateModuleResponse.ProtoReflect.Descriptor instead. +func (*UpdateModuleResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateModuleResponse) GetModule() *Module { + if x != nil { + return x.Module + } + return nil +} + +type DeleteModuleRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` +} + +func (x *DeleteModuleRequest) Reset() { + *x = DeleteModuleRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteModuleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteModuleRequest) ProtoMessage() {} + +func (x *DeleteModuleRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteModuleRequest.ProtoReflect.Descriptor instead. +func (*DeleteModuleRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{9} +} + +func (x *DeleteModuleRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +type DeleteModuleResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteModuleResponse) Reset() { + *x = DeleteModuleResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteModuleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteModuleResponse) ProtoMessage() {} + +func (x *DeleteModuleResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_module_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteModuleResponse.ProtoReflect.Descriptor instead. +func (*DeleteModuleResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP(), []int{10} +} + +var File_gotocompany_entropy_v1beta1_module_proto protoreflect.FileDescriptor + +var file_gotocompany_entropy_v1beta1_module_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2f, 0x65, 0x6e, + 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x6d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x67, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, 0x01, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0x2e, 0x0a, + 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x54, 0x0a, + 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, + 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x07, 0x6d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x73, 0x22, 0x24, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, + 0x0a, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, + 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, + 0x75, 0x6c, 0x65, 0x52, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x52, 0x0a, 0x13, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, + 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, + 0x53, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x6d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x22, 0x5f, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x30, 0x0a, + 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, + 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x52, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x27, 0x0a, 0x13, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x75, 0x72, 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x6f, 0x64, + 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf0, 0x05, 0x0a, 0x0d, + 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8a, 0x01, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x2e, + 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, + 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, + 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, + 0x16, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x73, 0x2f, 0x7b, 0x75, 0x72, 0x6e, 0x7d, 0x12, 0x95, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x64, + 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x10, 0x2f, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x96, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, + 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x3a, 0x01, 0x2a, + 0x32, 0x16, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, + 0x65, 0x73, 0x2f, 0x7b, 0x75, 0x72, 0x6e, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, + 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x2a, 0x16, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x75, 0x72, 0x6e, 0x7d, 0x42, 0x75, + 0x0a, 0x26, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x12, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2f, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_gotocompany_entropy_v1beta1_module_proto_rawDescOnce sync.Once + file_gotocompany_entropy_v1beta1_module_proto_rawDescData = file_gotocompany_entropy_v1beta1_module_proto_rawDesc +) + +func file_gotocompany_entropy_v1beta1_module_proto_rawDescGZIP() []byte { + file_gotocompany_entropy_v1beta1_module_proto_rawDescOnce.Do(func() { + file_gotocompany_entropy_v1beta1_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_gotocompany_entropy_v1beta1_module_proto_rawDescData) + }) + return file_gotocompany_entropy_v1beta1_module_proto_rawDescData +} + +var file_gotocompany_entropy_v1beta1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_gotocompany_entropy_v1beta1_module_proto_goTypes = []interface{}{ + (*Module)(nil), // 0: gotocompany.entropy.v1beta1.Module + (*ListModulesRequest)(nil), // 1: gotocompany.entropy.v1beta1.ListModulesRequest + (*ListModulesResponse)(nil), // 2: gotocompany.entropy.v1beta1.ListModulesResponse + (*GetModuleRequest)(nil), // 3: gotocompany.entropy.v1beta1.GetModuleRequest + (*GetModuleResponse)(nil), // 4: gotocompany.entropy.v1beta1.GetModuleResponse + (*CreateModuleRequest)(nil), // 5: gotocompany.entropy.v1beta1.CreateModuleRequest + (*CreateModuleResponse)(nil), // 6: gotocompany.entropy.v1beta1.CreateModuleResponse + (*UpdateModuleRequest)(nil), // 7: gotocompany.entropy.v1beta1.UpdateModuleRequest + (*UpdateModuleResponse)(nil), // 8: gotocompany.entropy.v1beta1.UpdateModuleResponse + (*DeleteModuleRequest)(nil), // 9: gotocompany.entropy.v1beta1.DeleteModuleRequest + (*DeleteModuleResponse)(nil), // 10: gotocompany.entropy.v1beta1.DeleteModuleResponse + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*structpb.Value)(nil), // 12: google.protobuf.Value +} +var file_gotocompany_entropy_v1beta1_module_proto_depIdxs = []int32{ + 11, // 0: gotocompany.entropy.v1beta1.Module.created_at:type_name -> google.protobuf.Timestamp + 11, // 1: gotocompany.entropy.v1beta1.Module.updated_at:type_name -> google.protobuf.Timestamp + 12, // 2: gotocompany.entropy.v1beta1.Module.configs:type_name -> google.protobuf.Value + 0, // 3: gotocompany.entropy.v1beta1.ListModulesResponse.modules:type_name -> gotocompany.entropy.v1beta1.Module + 0, // 4: gotocompany.entropy.v1beta1.GetModuleResponse.module:type_name -> gotocompany.entropy.v1beta1.Module + 0, // 5: gotocompany.entropy.v1beta1.CreateModuleRequest.module:type_name -> gotocompany.entropy.v1beta1.Module + 0, // 6: gotocompany.entropy.v1beta1.CreateModuleResponse.module:type_name -> gotocompany.entropy.v1beta1.Module + 12, // 7: gotocompany.entropy.v1beta1.UpdateModuleRequest.configs:type_name -> google.protobuf.Value + 0, // 8: gotocompany.entropy.v1beta1.UpdateModuleResponse.module:type_name -> gotocompany.entropy.v1beta1.Module + 1, // 9: gotocompany.entropy.v1beta1.ModuleService.ListModules:input_type -> gotocompany.entropy.v1beta1.ListModulesRequest + 3, // 10: gotocompany.entropy.v1beta1.ModuleService.GetModule:input_type -> gotocompany.entropy.v1beta1.GetModuleRequest + 5, // 11: gotocompany.entropy.v1beta1.ModuleService.CreateModule:input_type -> gotocompany.entropy.v1beta1.CreateModuleRequest + 7, // 12: gotocompany.entropy.v1beta1.ModuleService.UpdateModule:input_type -> gotocompany.entropy.v1beta1.UpdateModuleRequest + 9, // 13: gotocompany.entropy.v1beta1.ModuleService.DeleteModule:input_type -> gotocompany.entropy.v1beta1.DeleteModuleRequest + 2, // 14: gotocompany.entropy.v1beta1.ModuleService.ListModules:output_type -> gotocompany.entropy.v1beta1.ListModulesResponse + 4, // 15: gotocompany.entropy.v1beta1.ModuleService.GetModule:output_type -> gotocompany.entropy.v1beta1.GetModuleResponse + 6, // 16: gotocompany.entropy.v1beta1.ModuleService.CreateModule:output_type -> gotocompany.entropy.v1beta1.CreateModuleResponse + 8, // 17: gotocompany.entropy.v1beta1.ModuleService.UpdateModule:output_type -> gotocompany.entropy.v1beta1.UpdateModuleResponse + 10, // 18: gotocompany.entropy.v1beta1.ModuleService.DeleteModule:output_type -> gotocompany.entropy.v1beta1.DeleteModuleResponse + 14, // [14:19] is the sub-list for method output_type + 9, // [9:14] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_gotocompany_entropy_v1beta1_module_proto_init() } +func file_gotocompany_entropy_v1beta1_module_proto_init() { + if File_gotocompany_entropy_v1beta1_module_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Module); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListModulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListModulesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetModuleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetModuleResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateModuleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateModuleResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateModuleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateModuleResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteModuleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_module_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteModuleResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_gotocompany_entropy_v1beta1_module_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_gotocompany_entropy_v1beta1_module_proto_goTypes, + DependencyIndexes: file_gotocompany_entropy_v1beta1_module_proto_depIdxs, + MessageInfos: file_gotocompany_entropy_v1beta1_module_proto_msgTypes, + }.Build() + File_gotocompany_entropy_v1beta1_module_proto = out.File + file_gotocompany_entropy_v1beta1_module_proto_rawDesc = nil + file_gotocompany_entropy_v1beta1_module_proto_goTypes = nil + file_gotocompany_entropy_v1beta1_module_proto_depIdxs = nil +} diff --git a/proto/gotocompany/entropy/v1beta1/module.pb.gw.go b/proto/gotocompany/entropy/v1beta1/module.pb.gw.go new file mode 100644 index 00000000..3d569e0e --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/module.pb.gw.go @@ -0,0 +1,583 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: gotocompany/entropy/v1beta1/module.proto + +/* +Package entropyv1beta1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package entropyv1beta1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +var ( + filter_ModuleService_ListModules_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_ModuleService_ListModules_0(ctx context.Context, marshaler runtime.Marshaler, client ModuleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListModulesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ModuleService_ListModules_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListModules(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ModuleService_ListModules_0(ctx context.Context, marshaler runtime.Marshaler, server ModuleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListModulesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ModuleService_ListModules_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListModules(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ModuleService_GetModule_0(ctx context.Context, marshaler runtime.Marshaler, client ModuleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetModuleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := client.GetModule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ModuleService_GetModule_0(ctx context.Context, marshaler runtime.Marshaler, server ModuleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetModuleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := server.GetModule(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ModuleService_CreateModule_0(ctx context.Context, marshaler runtime.Marshaler, client ModuleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateModuleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Module); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.CreateModule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ModuleService_CreateModule_0(ctx context.Context, marshaler runtime.Marshaler, server ModuleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateModuleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Module); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.CreateModule(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ModuleService_UpdateModule_0(ctx context.Context, marshaler runtime.Marshaler, client ModuleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateModuleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := client.UpdateModule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ModuleService_UpdateModule_0(ctx context.Context, marshaler runtime.Marshaler, server ModuleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateModuleRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := server.UpdateModule(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ModuleService_DeleteModule_0(ctx context.Context, marshaler runtime.Marshaler, client ModuleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteModuleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := client.DeleteModule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ModuleService_DeleteModule_0(ctx context.Context, marshaler runtime.Marshaler, server ModuleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteModuleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := server.DeleteModule(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterModuleServiceHandlerServer registers the http handlers for service ModuleService to "mux". +// UnaryRPC :call ModuleServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterModuleServiceHandlerFromEndpoint instead. +func RegisterModuleServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ModuleServiceServer) error { + + mux.Handle("GET", pattern_ModuleService_ListModules_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/ListModules", runtime.WithHTTPPathPattern("/v1beta1/modules")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ModuleService_ListModules_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_ListModules_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ModuleService_GetModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/GetModule", runtime.WithHTTPPathPattern("/v1beta1/modules/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ModuleService_GetModule_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_GetModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ModuleService_CreateModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/CreateModule", runtime.WithHTTPPathPattern("/v1beta1/modules")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ModuleService_CreateModule_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_CreateModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PATCH", pattern_ModuleService_UpdateModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/UpdateModule", runtime.WithHTTPPathPattern("/v1beta1/modules/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ModuleService_UpdateModule_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_UpdateModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_ModuleService_DeleteModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/DeleteModule", runtime.WithHTTPPathPattern("/v1beta1/modules/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ModuleService_DeleteModule_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_DeleteModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterModuleServiceHandlerFromEndpoint is same as RegisterModuleServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterModuleServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterModuleServiceHandler(ctx, mux, conn) +} + +// RegisterModuleServiceHandler registers the http handlers for service ModuleService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterModuleServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterModuleServiceHandlerClient(ctx, mux, NewModuleServiceClient(conn)) +} + +// RegisterModuleServiceHandlerClient registers the http handlers for service ModuleService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ModuleServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ModuleServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ModuleServiceClient" to call the correct interceptors. +func RegisterModuleServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ModuleServiceClient) error { + + mux.Handle("GET", pattern_ModuleService_ListModules_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/ListModules", runtime.WithHTTPPathPattern("/v1beta1/modules")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ModuleService_ListModules_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_ListModules_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ModuleService_GetModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/GetModule", runtime.WithHTTPPathPattern("/v1beta1/modules/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ModuleService_GetModule_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_GetModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ModuleService_CreateModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/CreateModule", runtime.WithHTTPPathPattern("/v1beta1/modules")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ModuleService_CreateModule_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_CreateModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PATCH", pattern_ModuleService_UpdateModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/UpdateModule", runtime.WithHTTPPathPattern("/v1beta1/modules/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ModuleService_UpdateModule_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_UpdateModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_ModuleService_DeleteModule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ModuleService/DeleteModule", runtime.WithHTTPPathPattern("/v1beta1/modules/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ModuleService_DeleteModule_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ModuleService_DeleteModule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_ModuleService_ListModules_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1beta1", "modules"}, "")) + + pattern_ModuleService_GetModule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1beta1", "modules", "urn"}, "")) + + pattern_ModuleService_CreateModule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1beta1", "modules"}, "")) + + pattern_ModuleService_UpdateModule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1beta1", "modules", "urn"}, "")) + + pattern_ModuleService_DeleteModule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1beta1", "modules", "urn"}, "")) +) + +var ( + forward_ModuleService_ListModules_0 = runtime.ForwardResponseMessage + + forward_ModuleService_GetModule_0 = runtime.ForwardResponseMessage + + forward_ModuleService_CreateModule_0 = runtime.ForwardResponseMessage + + forward_ModuleService_UpdateModule_0 = runtime.ForwardResponseMessage + + forward_ModuleService_DeleteModule_0 = runtime.ForwardResponseMessage +) diff --git a/proto/gotocompany/entropy/v1beta1/module.pb.validate.go b/proto/gotocompany/entropy/v1beta1/module.pb.validate.go new file mode 100644 index 00000000..cce53c4b --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/module.pb.validate.go @@ -0,0 +1,1432 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: gotocompany/entropy/v1beta1/module.proto + +package entropyv1beta1 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Module with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Module) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Module with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in ModuleMultiError, or nil if none found. +func (m *Module) ValidateAll() error { + return m.validate(true) +} + +func (m *Module) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + // no validation rules for Name + + // no validation rules for Project + + if all { + switch v := interface{}(m.GetCreatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ModuleValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ModuleValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ModuleValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpdatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ModuleValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ModuleValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ModuleValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetConfigs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ModuleValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ModuleValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ModuleValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ModuleMultiError(errors) + } + + return nil +} + +// ModuleMultiError is an error wrapping multiple validation errors returned by +// Module.ValidateAll() if the designated constraints aren't met. +type ModuleMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ModuleMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ModuleMultiError) AllErrors() []error { return m } + +// ModuleValidationError is the validation error returned by Module.Validate if +// the designated constraints aren't met. +type ModuleValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ModuleValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ModuleValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ModuleValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ModuleValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ModuleValidationError) ErrorName() string { return "ModuleValidationError" } + +// Error satisfies the builtin error interface +func (e ModuleValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sModule.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ModuleValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ModuleValidationError{} + +// Validate checks the field values on ListModulesRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListModulesRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListModulesRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListModulesRequestMultiError, or nil if none found. +func (m *ListModulesRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ListModulesRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Project + + if len(errors) > 0 { + return ListModulesRequestMultiError(errors) + } + + return nil +} + +// ListModulesRequestMultiError is an error wrapping multiple validation errors +// returned by ListModulesRequest.ValidateAll() if the designated constraints +// aren't met. +type ListModulesRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListModulesRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListModulesRequestMultiError) AllErrors() []error { return m } + +// ListModulesRequestValidationError is the validation error returned by +// ListModulesRequest.Validate if the designated constraints aren't met. +type ListModulesRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListModulesRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListModulesRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListModulesRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListModulesRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListModulesRequestValidationError) ErrorName() string { + return "ListModulesRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ListModulesRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListModulesRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListModulesRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListModulesRequestValidationError{} + +// Validate checks the field values on ListModulesResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListModulesResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListModulesResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListModulesResponseMultiError, or nil if none found. +func (m *ListModulesResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ListModulesResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetModules() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListModulesResponseValidationError{ + field: fmt.Sprintf("Modules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListModulesResponseValidationError{ + field: fmt.Sprintf("Modules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListModulesResponseValidationError{ + field: fmt.Sprintf("Modules[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ListModulesResponseMultiError(errors) + } + + return nil +} + +// ListModulesResponseMultiError is an error wrapping multiple validation +// errors returned by ListModulesResponse.ValidateAll() if the designated +// constraints aren't met. +type ListModulesResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListModulesResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListModulesResponseMultiError) AllErrors() []error { return m } + +// ListModulesResponseValidationError is the validation error returned by +// ListModulesResponse.Validate if the designated constraints aren't met. +type ListModulesResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListModulesResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListModulesResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListModulesResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListModulesResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListModulesResponseValidationError) ErrorName() string { + return "ListModulesResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ListModulesResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListModulesResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListModulesResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListModulesResponseValidationError{} + +// Validate checks the field values on GetModuleRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetModuleRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetModuleRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetModuleRequestMultiError, or nil if none found. +func (m *GetModuleRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetModuleRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + if len(errors) > 0 { + return GetModuleRequestMultiError(errors) + } + + return nil +} + +// GetModuleRequestMultiError is an error wrapping multiple validation errors +// returned by GetModuleRequest.ValidateAll() if the designated constraints +// aren't met. +type GetModuleRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetModuleRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetModuleRequestMultiError) AllErrors() []error { return m } + +// GetModuleRequestValidationError is the validation error returned by +// GetModuleRequest.Validate if the designated constraints aren't met. +type GetModuleRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetModuleRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetModuleRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetModuleRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetModuleRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetModuleRequestValidationError) ErrorName() string { return "GetModuleRequestValidationError" } + +// Error satisfies the builtin error interface +func (e GetModuleRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetModuleRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetModuleRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetModuleRequestValidationError{} + +// Validate checks the field values on GetModuleResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetModuleResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetModuleResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetModuleResponseMultiError, or nil if none found. +func (m *GetModuleResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetModuleResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetModule()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetModule()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetModuleResponseMultiError(errors) + } + + return nil +} + +// GetModuleResponseMultiError is an error wrapping multiple validation errors +// returned by GetModuleResponse.ValidateAll() if the designated constraints +// aren't met. +type GetModuleResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetModuleResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetModuleResponseMultiError) AllErrors() []error { return m } + +// GetModuleResponseValidationError is the validation error returned by +// GetModuleResponse.Validate if the designated constraints aren't met. +type GetModuleResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetModuleResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetModuleResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetModuleResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetModuleResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetModuleResponseValidationError) ErrorName() string { + return "GetModuleResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetModuleResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetModuleResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetModuleResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetModuleResponseValidationError{} + +// Validate checks the field values on CreateModuleRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateModuleRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateModuleRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateModuleRequestMultiError, or nil if none found. +func (m *CreateModuleRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateModuleRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetModule()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateModuleRequestValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateModuleRequestValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetModule()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateModuleRequestValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CreateModuleRequestMultiError(errors) + } + + return nil +} + +// CreateModuleRequestMultiError is an error wrapping multiple validation +// errors returned by CreateModuleRequest.ValidateAll() if the designated +// constraints aren't met. +type CreateModuleRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateModuleRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateModuleRequestMultiError) AllErrors() []error { return m } + +// CreateModuleRequestValidationError is the validation error returned by +// CreateModuleRequest.Validate if the designated constraints aren't met. +type CreateModuleRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateModuleRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateModuleRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateModuleRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateModuleRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateModuleRequestValidationError) ErrorName() string { + return "CreateModuleRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateModuleRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateModuleRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateModuleRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateModuleRequestValidationError{} + +// Validate checks the field values on CreateModuleResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateModuleResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateModuleResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateModuleResponseMultiError, or nil if none found. +func (m *CreateModuleResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateModuleResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetModule()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetModule()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CreateModuleResponseMultiError(errors) + } + + return nil +} + +// CreateModuleResponseMultiError is an error wrapping multiple validation +// errors returned by CreateModuleResponse.ValidateAll() if the designated +// constraints aren't met. +type CreateModuleResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateModuleResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateModuleResponseMultiError) AllErrors() []error { return m } + +// CreateModuleResponseValidationError is the validation error returned by +// CreateModuleResponse.Validate if the designated constraints aren't met. +type CreateModuleResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateModuleResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateModuleResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateModuleResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateModuleResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateModuleResponseValidationError) ErrorName() string { + return "CreateModuleResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateModuleResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateModuleResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateModuleResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateModuleResponseValidationError{} + +// Validate checks the field values on UpdateModuleRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateModuleRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateModuleRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateModuleRequestMultiError, or nil if none found. +func (m *UpdateModuleRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateModuleRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + if all { + switch v := interface{}(m.GetConfigs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateModuleRequestValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateModuleRequestValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateModuleRequestValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UpdateModuleRequestMultiError(errors) + } + + return nil +} + +// UpdateModuleRequestMultiError is an error wrapping multiple validation +// errors returned by UpdateModuleRequest.ValidateAll() if the designated +// constraints aren't met. +type UpdateModuleRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateModuleRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateModuleRequestMultiError) AllErrors() []error { return m } + +// UpdateModuleRequestValidationError is the validation error returned by +// UpdateModuleRequest.Validate if the designated constraints aren't met. +type UpdateModuleRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateModuleRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateModuleRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateModuleRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateModuleRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateModuleRequestValidationError) ErrorName() string { + return "UpdateModuleRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateModuleRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateModuleRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateModuleRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateModuleRequestValidationError{} + +// Validate checks the field values on UpdateModuleResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateModuleResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateModuleResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateModuleResponseMultiError, or nil if none found. +func (m *UpdateModuleResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateModuleResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetModule()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetModule()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateModuleResponseValidationError{ + field: "Module", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UpdateModuleResponseMultiError(errors) + } + + return nil +} + +// UpdateModuleResponseMultiError is an error wrapping multiple validation +// errors returned by UpdateModuleResponse.ValidateAll() if the designated +// constraints aren't met. +type UpdateModuleResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateModuleResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateModuleResponseMultiError) AllErrors() []error { return m } + +// UpdateModuleResponseValidationError is the validation error returned by +// UpdateModuleResponse.Validate if the designated constraints aren't met. +type UpdateModuleResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateModuleResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateModuleResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateModuleResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateModuleResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateModuleResponseValidationError) ErrorName() string { + return "UpdateModuleResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateModuleResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateModuleResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateModuleResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateModuleResponseValidationError{} + +// Validate checks the field values on DeleteModuleRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeleteModuleRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeleteModuleRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeleteModuleRequestMultiError, or nil if none found. +func (m *DeleteModuleRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DeleteModuleRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + if len(errors) > 0 { + return DeleteModuleRequestMultiError(errors) + } + + return nil +} + +// DeleteModuleRequestMultiError is an error wrapping multiple validation +// errors returned by DeleteModuleRequest.ValidateAll() if the designated +// constraints aren't met. +type DeleteModuleRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeleteModuleRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeleteModuleRequestMultiError) AllErrors() []error { return m } + +// DeleteModuleRequestValidationError is the validation error returned by +// DeleteModuleRequest.Validate if the designated constraints aren't met. +type DeleteModuleRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeleteModuleRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeleteModuleRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeleteModuleRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeleteModuleRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeleteModuleRequestValidationError) ErrorName() string { + return "DeleteModuleRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e DeleteModuleRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeleteModuleRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeleteModuleRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeleteModuleRequestValidationError{} + +// Validate checks the field values on DeleteModuleResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeleteModuleResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeleteModuleResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeleteModuleResponseMultiError, or nil if none found. +func (m *DeleteModuleResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *DeleteModuleResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return DeleteModuleResponseMultiError(errors) + } + + return nil +} + +// DeleteModuleResponseMultiError is an error wrapping multiple validation +// errors returned by DeleteModuleResponse.ValidateAll() if the designated +// constraints aren't met. +type DeleteModuleResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeleteModuleResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeleteModuleResponseMultiError) AllErrors() []error { return m } + +// DeleteModuleResponseValidationError is the validation error returned by +// DeleteModuleResponse.Validate if the designated constraints aren't met. +type DeleteModuleResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeleteModuleResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeleteModuleResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeleteModuleResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeleteModuleResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeleteModuleResponseValidationError) ErrorName() string { + return "DeleteModuleResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e DeleteModuleResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeleteModuleResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeleteModuleResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeleteModuleResponseValidationError{} diff --git a/proto/gotocompany/entropy/v1beta1/module_grpc.pb.go b/proto/gotocompany/entropy/v1beta1/module_grpc.pb.go new file mode 100644 index 00000000..a7015b3c --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/module_grpc.pb.go @@ -0,0 +1,257 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: gotocompany/entropy/v1beta1/module.proto + +package entropyv1beta1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ModuleService_ListModules_FullMethodName = "/gotocompany.entropy.v1beta1.ModuleService/ListModules" + ModuleService_GetModule_FullMethodName = "/gotocompany.entropy.v1beta1.ModuleService/GetModule" + ModuleService_CreateModule_FullMethodName = "/gotocompany.entropy.v1beta1.ModuleService/CreateModule" + ModuleService_UpdateModule_FullMethodName = "/gotocompany.entropy.v1beta1.ModuleService/UpdateModule" + ModuleService_DeleteModule_FullMethodName = "/gotocompany.entropy.v1beta1.ModuleService/DeleteModule" +) + +// ModuleServiceClient is the client API for ModuleService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ModuleServiceClient interface { + ListModules(ctx context.Context, in *ListModulesRequest, opts ...grpc.CallOption) (*ListModulesResponse, error) + GetModule(ctx context.Context, in *GetModuleRequest, opts ...grpc.CallOption) (*GetModuleResponse, error) + CreateModule(ctx context.Context, in *CreateModuleRequest, opts ...grpc.CallOption) (*CreateModuleResponse, error) + UpdateModule(ctx context.Context, in *UpdateModuleRequest, opts ...grpc.CallOption) (*UpdateModuleResponse, error) + DeleteModule(ctx context.Context, in *DeleteModuleRequest, opts ...grpc.CallOption) (*DeleteModuleResponse, error) +} + +type moduleServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewModuleServiceClient(cc grpc.ClientConnInterface) ModuleServiceClient { + return &moduleServiceClient{cc} +} + +func (c *moduleServiceClient) ListModules(ctx context.Context, in *ListModulesRequest, opts ...grpc.CallOption) (*ListModulesResponse, error) { + out := new(ListModulesResponse) + err := c.cc.Invoke(ctx, ModuleService_ListModules_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *moduleServiceClient) GetModule(ctx context.Context, in *GetModuleRequest, opts ...grpc.CallOption) (*GetModuleResponse, error) { + out := new(GetModuleResponse) + err := c.cc.Invoke(ctx, ModuleService_GetModule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *moduleServiceClient) CreateModule(ctx context.Context, in *CreateModuleRequest, opts ...grpc.CallOption) (*CreateModuleResponse, error) { + out := new(CreateModuleResponse) + err := c.cc.Invoke(ctx, ModuleService_CreateModule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *moduleServiceClient) UpdateModule(ctx context.Context, in *UpdateModuleRequest, opts ...grpc.CallOption) (*UpdateModuleResponse, error) { + out := new(UpdateModuleResponse) + err := c.cc.Invoke(ctx, ModuleService_UpdateModule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *moduleServiceClient) DeleteModule(ctx context.Context, in *DeleteModuleRequest, opts ...grpc.CallOption) (*DeleteModuleResponse, error) { + out := new(DeleteModuleResponse) + err := c.cc.Invoke(ctx, ModuleService_DeleteModule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ModuleServiceServer is the server API for ModuleService service. +// All implementations must embed UnimplementedModuleServiceServer +// for forward compatibility +type ModuleServiceServer interface { + ListModules(context.Context, *ListModulesRequest) (*ListModulesResponse, error) + GetModule(context.Context, *GetModuleRequest) (*GetModuleResponse, error) + CreateModule(context.Context, *CreateModuleRequest) (*CreateModuleResponse, error) + UpdateModule(context.Context, *UpdateModuleRequest) (*UpdateModuleResponse, error) + DeleteModule(context.Context, *DeleteModuleRequest) (*DeleteModuleResponse, error) + mustEmbedUnimplementedModuleServiceServer() +} + +// UnimplementedModuleServiceServer must be embedded to have forward compatible implementations. +type UnimplementedModuleServiceServer struct { +} + +func (UnimplementedModuleServiceServer) ListModules(context.Context, *ListModulesRequest) (*ListModulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListModules not implemented") +} +func (UnimplementedModuleServiceServer) GetModule(context.Context, *GetModuleRequest) (*GetModuleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetModule not implemented") +} +func (UnimplementedModuleServiceServer) CreateModule(context.Context, *CreateModuleRequest) (*CreateModuleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateModule not implemented") +} +func (UnimplementedModuleServiceServer) UpdateModule(context.Context, *UpdateModuleRequest) (*UpdateModuleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateModule not implemented") +} +func (UnimplementedModuleServiceServer) DeleteModule(context.Context, *DeleteModuleRequest) (*DeleteModuleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteModule not implemented") +} +func (UnimplementedModuleServiceServer) mustEmbedUnimplementedModuleServiceServer() {} + +// UnsafeModuleServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ModuleServiceServer will +// result in compilation errors. +type UnsafeModuleServiceServer interface { + mustEmbedUnimplementedModuleServiceServer() +} + +func RegisterModuleServiceServer(s grpc.ServiceRegistrar, srv ModuleServiceServer) { + s.RegisterService(&ModuleService_ServiceDesc, srv) +} + +func _ModuleService_ListModules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListModulesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModuleServiceServer).ListModules(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ModuleService_ListModules_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModuleServiceServer).ListModules(ctx, req.(*ListModulesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModuleService_GetModule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetModuleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModuleServiceServer).GetModule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ModuleService_GetModule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModuleServiceServer).GetModule(ctx, req.(*GetModuleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModuleService_CreateModule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateModuleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModuleServiceServer).CreateModule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ModuleService_CreateModule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModuleServiceServer).CreateModule(ctx, req.(*CreateModuleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModuleService_UpdateModule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateModuleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModuleServiceServer).UpdateModule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ModuleService_UpdateModule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModuleServiceServer).UpdateModule(ctx, req.(*UpdateModuleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ModuleService_DeleteModule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteModuleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ModuleServiceServer).DeleteModule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ModuleService_DeleteModule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ModuleServiceServer).DeleteModule(ctx, req.(*DeleteModuleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ModuleService_ServiceDesc is the grpc.ServiceDesc for ModuleService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ModuleService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "gotocompany.entropy.v1beta1.ModuleService", + HandlerType: (*ModuleServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListModules", + Handler: _ModuleService_ListModules_Handler, + }, + { + MethodName: "GetModule", + Handler: _ModuleService_GetModule_Handler, + }, + { + MethodName: "CreateModule", + Handler: _ModuleService_CreateModule_Handler, + }, + { + MethodName: "UpdateModule", + Handler: _ModuleService_UpdateModule_Handler, + }, + { + MethodName: "DeleteModule", + Handler: _ModuleService_DeleteModule_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "gotocompany/entropy/v1beta1/module.proto", +} diff --git a/proto/gotocompany/entropy/v1beta1/resource.pb.go b/proto/gotocompany/entropy/v1beta1/resource.pb.go new file mode 100644 index 00000000..85c9ddad --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/resource.pb.go @@ -0,0 +1,2309 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: gotocompany/entropy/v1beta1/resource.proto + +package entropyv1beta1 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceState_Status int32 + +const ( + ResourceState_STATUS_UNSPECIFIED ResourceState_Status = 0 + ResourceState_STATUS_PENDING ResourceState_Status = 1 + ResourceState_STATUS_ERROR ResourceState_Status = 2 + ResourceState_STATUS_DELETED ResourceState_Status = 3 + ResourceState_STATUS_COMPLETED ResourceState_Status = 4 +) + +// Enum value maps for ResourceState_Status. +var ( + ResourceState_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "STATUS_PENDING", + 2: "STATUS_ERROR", + 3: "STATUS_DELETED", + 4: "STATUS_COMPLETED", + } + ResourceState_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "STATUS_PENDING": 1, + "STATUS_ERROR": 2, + "STATUS_DELETED": 3, + "STATUS_COMPLETED": 4, + } +) + +func (x ResourceState_Status) Enum() *ResourceState_Status { + p := new(ResourceState_Status) + *p = x + return p +} + +func (x ResourceState_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceState_Status) Descriptor() protoreflect.EnumDescriptor { + return file_gotocompany_entropy_v1beta1_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceState_Status) Type() protoreflect.EnumType { + return &file_gotocompany_entropy_v1beta1_resource_proto_enumTypes[0] +} + +func (x ResourceState_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceState_Status.Descriptor instead. +func (ResourceState_Status) EnumDescriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{4, 0} +} + +type ResourceDependency struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key should be as defined by the module being used for + // the resource. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Value should refer to an existing resource via URN. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ResourceDependency) Reset() { + *x = ResourceDependency{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDependency) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDependency) ProtoMessage() {} + +func (x *ResourceDependency) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDependency.ProtoReflect.Descriptor instead. +func (*ResourceDependency) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDependency) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ResourceDependency) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type ResourceSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Configs *structpb.Value `protobuf:"bytes,1,opt,name=configs,proto3" json:"configs,omitempty"` + // dependencies can be used to refer to other existing resources + // as dependency of this resource. + Dependencies []*ResourceDependency `protobuf:"bytes,2,rep,name=dependencies,proto3" json:"dependencies,omitempty"` +} + +func (x *ResourceSpec) Reset() { + *x = ResourceSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSpec) ProtoMessage() {} + +func (x *ResourceSpec) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSpec.ProtoReflect.Descriptor instead. +func (*ResourceSpec) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceSpec) GetConfigs() *structpb.Value { + if x != nil { + return x.Configs + } + return nil +} + +func (x *ResourceSpec) GetDependencies() []*ResourceDependency { + if x != nil { + return x.Dependencies + } + return nil +} + +type ListString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListString) Reset() { + *x = ListString{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListString) ProtoMessage() {} + +func (x *ListString) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListString.ProtoReflect.Descriptor instead. +func (*ListString) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{2} +} + +func (x *ListString) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + +type LogOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters map[string]*ListString `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *LogOptions) Reset() { + *x = LogOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogOptions) ProtoMessage() {} + +func (x *LogOptions) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogOptions.ProtoReflect.Descriptor instead. +func (*LogOptions) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{3} +} + +func (x *LogOptions) GetFilters() map[string]*ListString { + if x != nil { + return x.Filters + } + return nil +} + +type ResourceState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status ResourceState_Status `protobuf:"varint,1,opt,name=status,proto3,enum=gotocompany.entropy.v1beta1.ResourceState_Status" json:"status,omitempty"` + Output *structpb.Value `protobuf:"bytes,2,opt,name=output,proto3" json:"output,omitempty"` + ModuleData []byte `protobuf:"bytes,3,opt,name=module_data,json=moduleData,proto3" json:"module_data,omitempty"` + LogOptions *LogOptions `protobuf:"bytes,4,opt,name=log_options,json=logOptions,proto3" json:"log_options,omitempty"` + // information about the ongoing sync process. + // if status is ERROR / PENDING, this value can be used to understand + // the issue. + SyncRetries int32 `protobuf:"varint,5,opt,name=sync_retries,json=syncRetries,proto3" json:"sync_retries,omitempty"` + SyncLastError string `protobuf:"bytes,6,opt,name=sync_last_error,json=syncLastError,proto3" json:"sync_last_error,omitempty"` + NextSyncAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=next_sync_at,json=nextSyncAt,proto3" json:"next_sync_at,omitempty"` +} + +func (x *ResourceState) Reset() { + *x = ResourceState{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceState) ProtoMessage() {} + +func (x *ResourceState) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceState.ProtoReflect.Descriptor instead. +func (*ResourceState) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{4} +} + +func (x *ResourceState) GetStatus() ResourceState_Status { + if x != nil { + return x.Status + } + return ResourceState_STATUS_UNSPECIFIED +} + +func (x *ResourceState) GetOutput() *structpb.Value { + if x != nil { + return x.Output + } + return nil +} + +func (x *ResourceState) GetModuleData() []byte { + if x != nil { + return x.ModuleData + } + return nil +} + +func (x *ResourceState) GetLogOptions() *LogOptions { + if x != nil { + return x.LogOptions + } + return nil +} + +func (x *ResourceState) GetSyncRetries() int32 { + if x != nil { + return x.SyncRetries + } + return 0 +} + +func (x *ResourceState) GetSyncLastError() string { + if x != nil { + return x.SyncLastError + } + return "" +} + +func (x *ResourceState) GetNextSyncAt() *timestamppb.Timestamp { + if x != nil { + return x.NextSyncAt + } + return nil +} + +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Project string `protobuf:"bytes,4,opt,name=project,proto3" json:"project,omitempty"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Spec *ResourceSpec `protobuf:"bytes,8,opt,name=spec,proto3" json:"spec,omitempty"` + State *ResourceState `protobuf:"bytes,9,opt,name=state,proto3" json:"state,omitempty"` + CreatedBy string `protobuf:"bytes,10,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` + UpdatedBy string `protobuf:"bytes,11,opt,name=updated_by,json=updatedBy,proto3" json:"updated_by,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{5} +} + +func (x *Resource) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +func (x *Resource) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *Resource) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Resource) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *Resource) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Resource) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Resource) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Resource) GetSpec() *ResourceSpec { + if x != nil { + return x.Spec + } + return nil +} + +func (x *Resource) GetState() *ResourceState { + if x != nil { + return x.State + } + return nil +} + +func (x *Resource) GetCreatedBy() string { + if x != nil { + return x.CreatedBy + } + return "" +} + +func (x *Resource) GetUpdatedBy() string { + if x != nil { + return x.UpdatedBy + } + return "" +} + +type ListResourcesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + // filter by labels. if specified, only resources with all the + // given labels will be returned. + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // this toggle if set, will return spec configs as well. + // it's default value is false. + WithSpecConfigs bool `protobuf:"varint,4,opt,name=with_spec_configs,json=withSpecConfigs,proto3" json:"with_spec_configs,omitempty"` + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageNum int32 `protobuf:"varint,6,opt,name=page_num,json=pageNum,proto3" json:"page_num,omitempty"` +} + +func (x *ListResourcesRequest) Reset() { + *x = ListResourcesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListResourcesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListResourcesRequest) ProtoMessage() {} + +func (x *ListResourcesRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResourcesRequest.ProtoReflect.Descriptor instead. +func (*ListResourcesRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{6} +} + +func (x *ListResourcesRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListResourcesRequest) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *ListResourcesRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *ListResourcesRequest) GetWithSpecConfigs() bool { + if x != nil { + return x.WithSpecConfigs + } + return false +} + +func (x *ListResourcesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListResourcesRequest) GetPageNum() int32 { + if x != nil { + return x.PageNum + } + return 0 +} + +type ListResourcesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resources []*Resource `protobuf:"bytes,1,rep,name=resources,proto3" json:"resources,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *ListResourcesResponse) Reset() { + *x = ListResourcesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListResourcesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListResourcesResponse) ProtoMessage() {} + +func (x *ListResourcesResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResourcesResponse.ProtoReflect.Descriptor instead. +func (*ListResourcesResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{7} +} + +func (x *ListResourcesResponse) GetResources() []*Resource { + if x != nil { + return x.Resources + } + return nil +} + +func (x *ListResourcesResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type GetResourceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` +} + +func (x *GetResourceRequest) Reset() { + *x = GetResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResourceRequest) ProtoMessage() {} + +func (x *GetResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResourceRequest.ProtoReflect.Descriptor instead. +func (*GetResourceRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{8} +} + +func (x *GetResourceRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +type GetResourceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *GetResourceResponse) Reset() { + *x = GetResourceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResourceResponse) ProtoMessage() {} + +func (x *GetResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResourceResponse.ProtoReflect.Descriptor instead. +func (*GetResourceResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{9} +} + +func (x *GetResourceResponse) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} + +type CreateResourceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + DryRun bool `protobuf:"varint,2,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` +} + +func (x *CreateResourceRequest) Reset() { + *x = CreateResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateResourceRequest) ProtoMessage() {} + +func (x *CreateResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateResourceRequest.ProtoReflect.Descriptor instead. +func (*CreateResourceRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{10} +} + +func (x *CreateResourceRequest) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *CreateResourceRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +type CreateResourceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *CreateResourceResponse) Reset() { + *x = CreateResourceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateResourceResponse) ProtoMessage() {} + +func (x *CreateResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateResourceResponse.ProtoReflect.Descriptor instead. +func (*CreateResourceResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{11} +} + +func (x *CreateResourceResponse) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} + +type UpdateResourceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` + NewSpec *ResourceSpec `protobuf:"bytes,2,opt,name=new_spec,json=newSpec,proto3" json:"new_spec,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` +} + +func (x *UpdateResourceRequest) Reset() { + *x = UpdateResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateResourceRequest) ProtoMessage() {} + +func (x *UpdateResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateResourceRequest.ProtoReflect.Descriptor instead. +func (*UpdateResourceRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{12} +} + +func (x *UpdateResourceRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +func (x *UpdateResourceRequest) GetNewSpec() *ResourceSpec { + if x != nil { + return x.NewSpec + } + return nil +} + +func (x *UpdateResourceRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *UpdateResourceRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +type UpdateResourceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *UpdateResourceResponse) Reset() { + *x = UpdateResourceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateResourceResponse) ProtoMessage() {} + +func (x *UpdateResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateResourceResponse.ProtoReflect.Descriptor instead. +func (*UpdateResourceResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{13} +} + +func (x *UpdateResourceResponse) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} + +type DeleteResourceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` +} + +func (x *DeleteResourceRequest) Reset() { + *x = DeleteResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResourceRequest) ProtoMessage() {} + +func (x *DeleteResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResourceRequest.ProtoReflect.Descriptor instead. +func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{14} +} + +func (x *DeleteResourceRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +type DeleteResourceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteResourceResponse) Reset() { + *x = DeleteResourceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResourceResponse) ProtoMessage() {} + +func (x *DeleteResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResourceResponse.ProtoReflect.Descriptor instead. +func (*DeleteResourceResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{15} +} + +type ApplyActionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` + Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` + Params *structpb.Value `protobuf:"bytes,3,opt,name=params,proto3" json:"params,omitempty"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DryRun bool `protobuf:"varint,5,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` +} + +func (x *ApplyActionRequest) Reset() { + *x = ApplyActionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyActionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyActionRequest) ProtoMessage() {} + +func (x *ApplyActionRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyActionRequest.ProtoReflect.Descriptor instead. +func (*ApplyActionRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{16} +} + +func (x *ApplyActionRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +func (x *ApplyActionRequest) GetAction() string { + if x != nil { + return x.Action + } + return "" +} + +func (x *ApplyActionRequest) GetParams() *structpb.Value { + if x != nil { + return x.Params + } + return nil +} + +func (x *ApplyActionRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *ApplyActionRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +type ApplyActionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *ApplyActionResponse) Reset() { + *x = ApplyActionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyActionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyActionResponse) ProtoMessage() {} + +func (x *ApplyActionResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyActionResponse.ProtoReflect.Descriptor instead. +func (*ApplyActionResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{17} +} + +func (x *ApplyActionResponse) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} + +type LogChunk struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *LogChunk) Reset() { + *x = LogChunk{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogChunk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogChunk) ProtoMessage() {} + +func (x *LogChunk) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogChunk.ProtoReflect.Descriptor instead. +func (*LogChunk) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{18} +} + +func (x *LogChunk) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *LogChunk) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +type GetLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` + Filter map[string]string `protobuf:"bytes,6,rep,name=filter,proto3" json:"filter,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetLogRequest) Reset() { + *x = GetLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLogRequest) ProtoMessage() {} + +func (x *GetLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLogRequest.ProtoReflect.Descriptor instead. +func (*GetLogRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{19} +} + +func (x *GetLogRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +func (x *GetLogRequest) GetFilter() map[string]string { + if x != nil { + return x.Filter + } + return nil +} + +type GetLogResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Chunk *LogChunk `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` +} + +func (x *GetLogResponse) Reset() { + *x = GetLogResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetLogResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLogResponse) ProtoMessage() {} + +func (x *GetLogResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLogResponse.ProtoReflect.Descriptor instead. +func (*GetLogResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{20} +} + +func (x *GetLogResponse) GetChunk() *LogChunk { + if x != nil { + return x.Chunk + } + return nil +} + +type ResourceRevision struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Urn string `protobuf:"bytes,2,opt,name=urn,proto3" json:"urn,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + Spec *ResourceSpec `protobuf:"bytes,5,opt,name=spec,proto3" json:"spec,omitempty"` + Reason string `protobuf:"bytes,6,opt,name=reason,proto3" json:"reason,omitempty"` + CreatedBy string `protobuf:"bytes,7,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` +} + +func (x *ResourceRevision) Reset() { + *x = ResourceRevision{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceRevision) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceRevision) ProtoMessage() {} + +func (x *ResourceRevision) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceRevision.ProtoReflect.Descriptor instead. +func (*ResourceRevision) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{21} +} + +func (x *ResourceRevision) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ResourceRevision) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +func (x *ResourceRevision) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *ResourceRevision) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *ResourceRevision) GetSpec() *ResourceSpec { + if x != nil { + return x.Spec + } + return nil +} + +func (x *ResourceRevision) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ResourceRevision) GetCreatedBy() string { + if x != nil { + return x.CreatedBy + } + return "" +} + +type GetResourceRevisionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` +} + +func (x *GetResourceRevisionsRequest) Reset() { + *x = GetResourceRevisionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResourceRevisionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResourceRevisionsRequest) ProtoMessage() {} + +func (x *GetResourceRevisionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResourceRevisionsRequest.ProtoReflect.Descriptor instead. +func (*GetResourceRevisionsRequest) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{22} +} + +func (x *GetResourceRevisionsRequest) GetUrn() string { + if x != nil { + return x.Urn + } + return "" +} + +type GetResourceRevisionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Revisions []*ResourceRevision `protobuf:"bytes,1,rep,name=revisions,proto3" json:"revisions,omitempty"` +} + +func (x *GetResourceRevisionsResponse) Reset() { + *x = GetResourceRevisionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResourceRevisionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResourceRevisionsResponse) ProtoMessage() {} + +func (x *GetResourceRevisionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResourceRevisionsResponse.ProtoReflect.Descriptor instead. +func (*GetResourceRevisionsResponse) Descriptor() ([]byte, []int) { + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP(), []int{23} +} + +func (x *GetResourceRevisionsResponse) GetRevisions() []*ResourceRevision { + if x != nil { + return x.Revisions + } + return nil +} + +var File_gotocompany_entropy_v1beta1_resource_proto protoreflect.FileDescriptor + +var file_gotocompany_entropy_v1beta1_resource_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2f, 0x65, 0x6e, + 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x67, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, + 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3c, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x95, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x53, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x0c, + 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x22, 0x24, 0x0a, 0x0a, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, + 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x1a, 0x63, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, + 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x03, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x63, + 0x4c, 0x61, 0x73, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3c, 0x0a, 0x0c, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6e, 0x65, 0x78, + 0x74, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x74, 0x22, 0x70, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, + 0x0c, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, + 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, + 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, 0x22, 0x99, 0x04, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x73, + 0x70, 0x65, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x40, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xba, 0x02, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x55, 0x0a, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, + 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, + 0x77, 0x69, 0x74, 0x68, 0x53, 0x70, 0x65, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x19, 0x0a, 0x08, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, + 0x70, 0x61, 0x67, 0x65, 0x4e, 0x75, 0x6d, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x72, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, + 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x22, 0x58, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x73, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x41, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, + 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x5b, 0x0a, + 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x9b, 0x02, 0x0a, 0x15, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x44, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x70, 0x65, 0x63, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x53, 0x70, 0x65, 0x63, 0x12, 0x56, 0x0a, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, + 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5b, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, + 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x29, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, + 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x12, 0x41, + 0x70, 0x70, 0x6c, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x75, 0x72, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x53, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, + 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xa4, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x49, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x6f, + 0x67, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xac, 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x4e, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0b, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4d, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, + 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x05, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x22, 0xf3, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x51, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, + 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2f, 0x0a, 0x1b, 0x47, 0x65, 0x74, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x22, 0x6b, 0x0a, 0x1c, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x91, 0x0a, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x92, 0x01, 0x0a, 0x0d, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x31, 0x2e, + 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x92, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, + 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, + 0x7b, 0x75, 0x72, 0x6e, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, + 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x22, 0x12, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x9e, 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, + 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, + 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x3a, 0x01, 0x2a, 0x32, 0x18, + 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x2f, 0x7b, 0x75, 0x72, 0x6e, 0x7d, 0x12, 0x9b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x2e, 0x67, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, + 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x33, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x2a, 0x18, 0x2f, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x75, 0x72, 0x6e, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x33, 0x3a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x29, 0x2f, 0x76, 0x31, 0x62, 0x65, + 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x75, + 0x72, 0x6e, 0x7d, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x8a, 0x01, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x12, + 0x2a, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, + 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, + 0x12, 0x1d, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x75, 0x72, 0x6e, 0x7d, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x30, + 0x01, 0x12, 0xb7, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, + 0x6e, 0x79, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x2a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x75, 0x72, 0x6e, + 0x7d, 0x2f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x77, 0x0a, 0x26, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x74, 0x6f, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x2f, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_gotocompany_entropy_v1beta1_resource_proto_rawDescOnce sync.Once + file_gotocompany_entropy_v1beta1_resource_proto_rawDescData = file_gotocompany_entropy_v1beta1_resource_proto_rawDesc +) + +func file_gotocompany_entropy_v1beta1_resource_proto_rawDescGZIP() []byte { + file_gotocompany_entropy_v1beta1_resource_proto_rawDescOnce.Do(func() { + file_gotocompany_entropy_v1beta1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_gotocompany_entropy_v1beta1_resource_proto_rawDescData) + }) + return file_gotocompany_entropy_v1beta1_resource_proto_rawDescData +} + +var file_gotocompany_entropy_v1beta1_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_gotocompany_entropy_v1beta1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 32) +var file_gotocompany_entropy_v1beta1_resource_proto_goTypes = []interface{}{ + (ResourceState_Status)(0), // 0: gotocompany.entropy.v1beta1.ResourceState.Status + (*ResourceDependency)(nil), // 1: gotocompany.entropy.v1beta1.ResourceDependency + (*ResourceSpec)(nil), // 2: gotocompany.entropy.v1beta1.ResourceSpec + (*ListString)(nil), // 3: gotocompany.entropy.v1beta1.ListString + (*LogOptions)(nil), // 4: gotocompany.entropy.v1beta1.LogOptions + (*ResourceState)(nil), // 5: gotocompany.entropy.v1beta1.ResourceState + (*Resource)(nil), // 6: gotocompany.entropy.v1beta1.Resource + (*ListResourcesRequest)(nil), // 7: gotocompany.entropy.v1beta1.ListResourcesRequest + (*ListResourcesResponse)(nil), // 8: gotocompany.entropy.v1beta1.ListResourcesResponse + (*GetResourceRequest)(nil), // 9: gotocompany.entropy.v1beta1.GetResourceRequest + (*GetResourceResponse)(nil), // 10: gotocompany.entropy.v1beta1.GetResourceResponse + (*CreateResourceRequest)(nil), // 11: gotocompany.entropy.v1beta1.CreateResourceRequest + (*CreateResourceResponse)(nil), // 12: gotocompany.entropy.v1beta1.CreateResourceResponse + (*UpdateResourceRequest)(nil), // 13: gotocompany.entropy.v1beta1.UpdateResourceRequest + (*UpdateResourceResponse)(nil), // 14: gotocompany.entropy.v1beta1.UpdateResourceResponse + (*DeleteResourceRequest)(nil), // 15: gotocompany.entropy.v1beta1.DeleteResourceRequest + (*DeleteResourceResponse)(nil), // 16: gotocompany.entropy.v1beta1.DeleteResourceResponse + (*ApplyActionRequest)(nil), // 17: gotocompany.entropy.v1beta1.ApplyActionRequest + (*ApplyActionResponse)(nil), // 18: gotocompany.entropy.v1beta1.ApplyActionResponse + (*LogChunk)(nil), // 19: gotocompany.entropy.v1beta1.LogChunk + (*GetLogRequest)(nil), // 20: gotocompany.entropy.v1beta1.GetLogRequest + (*GetLogResponse)(nil), // 21: gotocompany.entropy.v1beta1.GetLogResponse + (*ResourceRevision)(nil), // 22: gotocompany.entropy.v1beta1.ResourceRevision + (*GetResourceRevisionsRequest)(nil), // 23: gotocompany.entropy.v1beta1.GetResourceRevisionsRequest + (*GetResourceRevisionsResponse)(nil), // 24: gotocompany.entropy.v1beta1.GetResourceRevisionsResponse + nil, // 25: gotocompany.entropy.v1beta1.LogOptions.FiltersEntry + nil, // 26: gotocompany.entropy.v1beta1.Resource.LabelsEntry + nil, // 27: gotocompany.entropy.v1beta1.ListResourcesRequest.LabelsEntry + nil, // 28: gotocompany.entropy.v1beta1.UpdateResourceRequest.LabelsEntry + nil, // 29: gotocompany.entropy.v1beta1.ApplyActionRequest.LabelsEntry + nil, // 30: gotocompany.entropy.v1beta1.LogChunk.LabelsEntry + nil, // 31: gotocompany.entropy.v1beta1.GetLogRequest.FilterEntry + nil, // 32: gotocompany.entropy.v1beta1.ResourceRevision.LabelsEntry + (*structpb.Value)(nil), // 33: google.protobuf.Value + (*timestamppb.Timestamp)(nil), // 34: google.protobuf.Timestamp +} +var file_gotocompany_entropy_v1beta1_resource_proto_depIdxs = []int32{ + 33, // 0: gotocompany.entropy.v1beta1.ResourceSpec.configs:type_name -> google.protobuf.Value + 1, // 1: gotocompany.entropy.v1beta1.ResourceSpec.dependencies:type_name -> gotocompany.entropy.v1beta1.ResourceDependency + 25, // 2: gotocompany.entropy.v1beta1.LogOptions.filters:type_name -> gotocompany.entropy.v1beta1.LogOptions.FiltersEntry + 0, // 3: gotocompany.entropy.v1beta1.ResourceState.status:type_name -> gotocompany.entropy.v1beta1.ResourceState.Status + 33, // 4: gotocompany.entropy.v1beta1.ResourceState.output:type_name -> google.protobuf.Value + 4, // 5: gotocompany.entropy.v1beta1.ResourceState.log_options:type_name -> gotocompany.entropy.v1beta1.LogOptions + 34, // 6: gotocompany.entropy.v1beta1.ResourceState.next_sync_at:type_name -> google.protobuf.Timestamp + 26, // 7: gotocompany.entropy.v1beta1.Resource.labels:type_name -> gotocompany.entropy.v1beta1.Resource.LabelsEntry + 34, // 8: gotocompany.entropy.v1beta1.Resource.created_at:type_name -> google.protobuf.Timestamp + 34, // 9: gotocompany.entropy.v1beta1.Resource.updated_at:type_name -> google.protobuf.Timestamp + 2, // 10: gotocompany.entropy.v1beta1.Resource.spec:type_name -> gotocompany.entropy.v1beta1.ResourceSpec + 5, // 11: gotocompany.entropy.v1beta1.Resource.state:type_name -> gotocompany.entropy.v1beta1.ResourceState + 27, // 12: gotocompany.entropy.v1beta1.ListResourcesRequest.labels:type_name -> gotocompany.entropy.v1beta1.ListResourcesRequest.LabelsEntry + 6, // 13: gotocompany.entropy.v1beta1.ListResourcesResponse.resources:type_name -> gotocompany.entropy.v1beta1.Resource + 6, // 14: gotocompany.entropy.v1beta1.GetResourceResponse.resource:type_name -> gotocompany.entropy.v1beta1.Resource + 6, // 15: gotocompany.entropy.v1beta1.CreateResourceRequest.resource:type_name -> gotocompany.entropy.v1beta1.Resource + 6, // 16: gotocompany.entropy.v1beta1.CreateResourceResponse.resource:type_name -> gotocompany.entropy.v1beta1.Resource + 2, // 17: gotocompany.entropy.v1beta1.UpdateResourceRequest.new_spec:type_name -> gotocompany.entropy.v1beta1.ResourceSpec + 28, // 18: gotocompany.entropy.v1beta1.UpdateResourceRequest.labels:type_name -> gotocompany.entropy.v1beta1.UpdateResourceRequest.LabelsEntry + 6, // 19: gotocompany.entropy.v1beta1.UpdateResourceResponse.resource:type_name -> gotocompany.entropy.v1beta1.Resource + 33, // 20: gotocompany.entropy.v1beta1.ApplyActionRequest.params:type_name -> google.protobuf.Value + 29, // 21: gotocompany.entropy.v1beta1.ApplyActionRequest.labels:type_name -> gotocompany.entropy.v1beta1.ApplyActionRequest.LabelsEntry + 6, // 22: gotocompany.entropy.v1beta1.ApplyActionResponse.resource:type_name -> gotocompany.entropy.v1beta1.Resource + 30, // 23: gotocompany.entropy.v1beta1.LogChunk.labels:type_name -> gotocompany.entropy.v1beta1.LogChunk.LabelsEntry + 31, // 24: gotocompany.entropy.v1beta1.GetLogRequest.filter:type_name -> gotocompany.entropy.v1beta1.GetLogRequest.FilterEntry + 19, // 25: gotocompany.entropy.v1beta1.GetLogResponse.chunk:type_name -> gotocompany.entropy.v1beta1.LogChunk + 32, // 26: gotocompany.entropy.v1beta1.ResourceRevision.labels:type_name -> gotocompany.entropy.v1beta1.ResourceRevision.LabelsEntry + 34, // 27: gotocompany.entropy.v1beta1.ResourceRevision.created_at:type_name -> google.protobuf.Timestamp + 2, // 28: gotocompany.entropy.v1beta1.ResourceRevision.spec:type_name -> gotocompany.entropy.v1beta1.ResourceSpec + 22, // 29: gotocompany.entropy.v1beta1.GetResourceRevisionsResponse.revisions:type_name -> gotocompany.entropy.v1beta1.ResourceRevision + 3, // 30: gotocompany.entropy.v1beta1.LogOptions.FiltersEntry.value:type_name -> gotocompany.entropy.v1beta1.ListString + 7, // 31: gotocompany.entropy.v1beta1.ResourceService.ListResources:input_type -> gotocompany.entropy.v1beta1.ListResourcesRequest + 9, // 32: gotocompany.entropy.v1beta1.ResourceService.GetResource:input_type -> gotocompany.entropy.v1beta1.GetResourceRequest + 11, // 33: gotocompany.entropy.v1beta1.ResourceService.CreateResource:input_type -> gotocompany.entropy.v1beta1.CreateResourceRequest + 13, // 34: gotocompany.entropy.v1beta1.ResourceService.UpdateResource:input_type -> gotocompany.entropy.v1beta1.UpdateResourceRequest + 15, // 35: gotocompany.entropy.v1beta1.ResourceService.DeleteResource:input_type -> gotocompany.entropy.v1beta1.DeleteResourceRequest + 17, // 36: gotocompany.entropy.v1beta1.ResourceService.ApplyAction:input_type -> gotocompany.entropy.v1beta1.ApplyActionRequest + 20, // 37: gotocompany.entropy.v1beta1.ResourceService.GetLog:input_type -> gotocompany.entropy.v1beta1.GetLogRequest + 23, // 38: gotocompany.entropy.v1beta1.ResourceService.GetResourceRevisions:input_type -> gotocompany.entropy.v1beta1.GetResourceRevisionsRequest + 8, // 39: gotocompany.entropy.v1beta1.ResourceService.ListResources:output_type -> gotocompany.entropy.v1beta1.ListResourcesResponse + 10, // 40: gotocompany.entropy.v1beta1.ResourceService.GetResource:output_type -> gotocompany.entropy.v1beta1.GetResourceResponse + 12, // 41: gotocompany.entropy.v1beta1.ResourceService.CreateResource:output_type -> gotocompany.entropy.v1beta1.CreateResourceResponse + 14, // 42: gotocompany.entropy.v1beta1.ResourceService.UpdateResource:output_type -> gotocompany.entropy.v1beta1.UpdateResourceResponse + 16, // 43: gotocompany.entropy.v1beta1.ResourceService.DeleteResource:output_type -> gotocompany.entropy.v1beta1.DeleteResourceResponse + 18, // 44: gotocompany.entropy.v1beta1.ResourceService.ApplyAction:output_type -> gotocompany.entropy.v1beta1.ApplyActionResponse + 21, // 45: gotocompany.entropy.v1beta1.ResourceService.GetLog:output_type -> gotocompany.entropy.v1beta1.GetLogResponse + 24, // 46: gotocompany.entropy.v1beta1.ResourceService.GetResourceRevisions:output_type -> gotocompany.entropy.v1beta1.GetResourceRevisionsResponse + 39, // [39:47] is the sub-list for method output_type + 31, // [31:39] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_gotocompany_entropy_v1beta1_resource_proto_init() } +func file_gotocompany_entropy_v1beta1_resource_proto_init() { + if File_gotocompany_entropy_v1beta1_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDependency); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResourcesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResourcesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResourceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateResourceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateResourceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResourceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyActionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyActionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogChunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLogResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceRevision); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResourceRevisionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gotocompany_entropy_v1beta1_resource_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResourceRevisionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_gotocompany_entropy_v1beta1_resource_proto_rawDesc, + NumEnums: 1, + NumMessages: 32, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_gotocompany_entropy_v1beta1_resource_proto_goTypes, + DependencyIndexes: file_gotocompany_entropy_v1beta1_resource_proto_depIdxs, + EnumInfos: file_gotocompany_entropy_v1beta1_resource_proto_enumTypes, + MessageInfos: file_gotocompany_entropy_v1beta1_resource_proto_msgTypes, + }.Build() + File_gotocompany_entropy_v1beta1_resource_proto = out.File + file_gotocompany_entropy_v1beta1_resource_proto_rawDesc = nil + file_gotocompany_entropy_v1beta1_resource_proto_goTypes = nil + file_gotocompany_entropy_v1beta1_resource_proto_depIdxs = nil +} diff --git a/proto/gotocompany/entropy/v1beta1/resource.pb.gw.go b/proto/gotocompany/entropy/v1beta1/resource.pb.gw.go new file mode 100644 index 00000000..810745a5 --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/resource.pb.gw.go @@ -0,0 +1,939 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: gotocompany/entropy/v1beta1/resource.proto + +/* +Package entropyv1beta1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package entropyv1beta1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +var ( + filter_ResourceService_ListResources_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_ResourceService_ListResources_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListResourcesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ResourceService_ListResources_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListResources(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ResourceService_ListResources_0(ctx context.Context, marshaler runtime.Marshaler, server ResourceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListResourcesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ResourceService_ListResources_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListResources(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ResourceService_GetResource_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetResourceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := client.GetResource(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ResourceService_GetResource_0(ctx context.Context, marshaler runtime.Marshaler, server ResourceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetResourceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := server.GetResource(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_ResourceService_CreateResource_0 = &utilities.DoubleArray{Encoding: map[string]int{"resource": 0}, Base: []int{1, 2, 0, 0}, Check: []int{0, 1, 2, 2}} +) + +func request_ResourceService_CreateResource_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateResourceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Resource); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ResourceService_CreateResource_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.CreateResource(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ResourceService_CreateResource_0(ctx context.Context, marshaler runtime.Marshaler, server ResourceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateResourceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Resource); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ResourceService_CreateResource_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.CreateResource(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ResourceService_UpdateResource_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateResourceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := client.UpdateResource(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ResourceService_UpdateResource_0(ctx context.Context, marshaler runtime.Marshaler, server ResourceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateResourceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := server.UpdateResource(ctx, &protoReq) + return msg, metadata, err + +} + +func request_ResourceService_DeleteResource_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteResourceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := client.DeleteResource(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ResourceService_DeleteResource_0(ctx context.Context, marshaler runtime.Marshaler, server ResourceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteResourceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := server.DeleteResource(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_ResourceService_ApplyAction_0 = &utilities.DoubleArray{Encoding: map[string]int{"params": 0, "urn": 1, "action": 2}, Base: []int{1, 2, 4, 6, 0, 0, 0, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 2, 3, 3, 4, 4}} +) + +func request_ResourceService_ApplyAction_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ApplyActionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Params); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + val, ok = pathParams["action"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "action") + } + + protoReq.Action, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "action", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ResourceService_ApplyAction_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ApplyAction(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ResourceService_ApplyAction_0(ctx context.Context, marshaler runtime.Marshaler, server ResourceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ApplyActionRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Params); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + val, ok = pathParams["action"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "action") + } + + protoReq.Action, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "action", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ResourceService_ApplyAction_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ApplyAction(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_ResourceService_GetLog_0 = &utilities.DoubleArray{Encoding: map[string]int{"urn": 0}, Base: []int{1, 2, 0, 0}, Check: []int{0, 1, 2, 2}} +) + +func request_ResourceService_GetLog_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (ResourceService_GetLogClient, runtime.ServerMetadata, error) { + var protoReq GetLogRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ResourceService_GetLog_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.GetLog(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +func request_ResourceService_GetResourceRevisions_0(ctx context.Context, marshaler runtime.Marshaler, client ResourceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetResourceRevisionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := client.GetResourceRevisions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ResourceService_GetResourceRevisions_0(ctx context.Context, marshaler runtime.Marshaler, server ResourceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetResourceRevisionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["urn"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "urn") + } + + protoReq.Urn, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "urn", err) + } + + msg, err := server.GetResourceRevisions(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterResourceServiceHandlerServer registers the http handlers for service ResourceService to "mux". +// UnaryRPC :call ResourceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterResourceServiceHandlerFromEndpoint instead. +func RegisterResourceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ResourceServiceServer) error { + + mux.Handle("GET", pattern_ResourceService_ListResources_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/ListResources", runtime.WithHTTPPathPattern("/v1beta1/resources")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ResourceService_ListResources_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_ListResources_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ResourceService_GetResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/GetResource", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ResourceService_GetResource_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_GetResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ResourceService_CreateResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/CreateResource", runtime.WithHTTPPathPattern("/v1beta1/resources")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ResourceService_CreateResource_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_CreateResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PATCH", pattern_ResourceService_UpdateResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/UpdateResource", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ResourceService_UpdateResource_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_UpdateResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_ResourceService_DeleteResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/DeleteResource", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ResourceService_DeleteResource_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_DeleteResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ResourceService_ApplyAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/ApplyAction", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}/actions/{action}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ResourceService_ApplyAction_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_ApplyAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ResourceService_GetLog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + mux.Handle("GET", pattern_ResourceService_GetResourceRevisions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/GetResourceRevisions", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}/revisions")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ResourceService_GetResourceRevisions_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_GetResourceRevisions_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterResourceServiceHandlerFromEndpoint is same as RegisterResourceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterResourceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterResourceServiceHandler(ctx, mux, conn) +} + +// RegisterResourceServiceHandler registers the http handlers for service ResourceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterResourceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterResourceServiceHandlerClient(ctx, mux, NewResourceServiceClient(conn)) +} + +// RegisterResourceServiceHandlerClient registers the http handlers for service ResourceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ResourceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ResourceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ResourceServiceClient" to call the correct interceptors. +func RegisterResourceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ResourceServiceClient) error { + + mux.Handle("GET", pattern_ResourceService_ListResources_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/ListResources", runtime.WithHTTPPathPattern("/v1beta1/resources")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_ListResources_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_ListResources_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ResourceService_GetResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/GetResource", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_GetResource_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_GetResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ResourceService_CreateResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/CreateResource", runtime.WithHTTPPathPattern("/v1beta1/resources")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_CreateResource_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_CreateResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PATCH", pattern_ResourceService_UpdateResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/UpdateResource", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_UpdateResource_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_UpdateResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_ResourceService_DeleteResource_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/DeleteResource", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_DeleteResource_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_DeleteResource_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_ResourceService_ApplyAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/ApplyAction", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}/actions/{action}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_ApplyAction_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_ApplyAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ResourceService_GetLog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/GetLog", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}/logs")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_GetLog_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_GetLog_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ResourceService_GetResourceRevisions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/gotocompany.entropy.v1beta1.ResourceService/GetResourceRevisions", runtime.WithHTTPPathPattern("/v1beta1/resources/{urn}/revisions")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ResourceService_GetResourceRevisions_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_ResourceService_GetResourceRevisions_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_ResourceService_ListResources_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1beta1", "resources"}, "")) + + pattern_ResourceService_GetResource_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1beta1", "resources", "urn"}, "")) + + pattern_ResourceService_CreateResource_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1beta1", "resources"}, "")) + + pattern_ResourceService_UpdateResource_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1beta1", "resources", "urn"}, "")) + + pattern_ResourceService_DeleteResource_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1beta1", "resources", "urn"}, "")) + + pattern_ResourceService_ApplyAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v1beta1", "resources", "urn", "actions", "action"}, "")) + + pattern_ResourceService_GetLog_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1beta1", "resources", "urn", "logs"}, "")) + + pattern_ResourceService_GetResourceRevisions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1beta1", "resources", "urn", "revisions"}, "")) +) + +var ( + forward_ResourceService_ListResources_0 = runtime.ForwardResponseMessage + + forward_ResourceService_GetResource_0 = runtime.ForwardResponseMessage + + forward_ResourceService_CreateResource_0 = runtime.ForwardResponseMessage + + forward_ResourceService_UpdateResource_0 = runtime.ForwardResponseMessage + + forward_ResourceService_DeleteResource_0 = runtime.ForwardResponseMessage + + forward_ResourceService_ApplyAction_0 = runtime.ForwardResponseMessage + + forward_ResourceService_GetLog_0 = runtime.ForwardResponseStream + + forward_ResourceService_GetResourceRevisions_0 = runtime.ForwardResponseMessage +) diff --git a/proto/gotocompany/entropy/v1beta1/resource.pb.validate.go b/proto/gotocompany/entropy/v1beta1/resource.pb.validate.go new file mode 100644 index 00000000..ee1882d9 --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/resource.pb.validate.go @@ -0,0 +1,3213 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: gotocompany/entropy/v1beta1/resource.proto + +package entropyv1beta1 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceDependency with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceDependency) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceDependency with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceDependencyMultiError, or nil if none found. +func (m *ResourceDependency) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceDependency) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for Value + + if len(errors) > 0 { + return ResourceDependencyMultiError(errors) + } + + return nil +} + +// ResourceDependencyMultiError is an error wrapping multiple validation errors +// returned by ResourceDependency.ValidateAll() if the designated constraints +// aren't met. +type ResourceDependencyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceDependencyMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceDependencyMultiError) AllErrors() []error { return m } + +// ResourceDependencyValidationError is the validation error returned by +// ResourceDependency.Validate if the designated constraints aren't met. +type ResourceDependencyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceDependencyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceDependencyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceDependencyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceDependencyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceDependencyValidationError) ErrorName() string { + return "ResourceDependencyValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceDependencyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceDependency.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceDependencyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceDependencyValidationError{} + +// Validate checks the field values on ResourceSpec with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceSpec) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceSpec with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceSpecMultiError, or +// nil if none found. +func (m *ResourceSpec) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceSpec) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetConfigs()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceSpecValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceSpecValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetConfigs()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceSpecValidationError{ + field: "Configs", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetDependencies() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceSpecValidationError{ + field: fmt.Sprintf("Dependencies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceSpecValidationError{ + field: fmt.Sprintf("Dependencies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceSpecValidationError{ + field: fmt.Sprintf("Dependencies[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ResourceSpecMultiError(errors) + } + + return nil +} + +// ResourceSpecMultiError is an error wrapping multiple validation errors +// returned by ResourceSpec.ValidateAll() if the designated constraints aren't met. +type ResourceSpecMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceSpecMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceSpecMultiError) AllErrors() []error { return m } + +// ResourceSpecValidationError is the validation error returned by +// ResourceSpec.Validate if the designated constraints aren't met. +type ResourceSpecValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceSpecValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceSpecValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceSpecValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceSpecValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceSpecValidationError) ErrorName() string { return "ResourceSpecValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceSpecValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceSpec.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceSpecValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceSpecValidationError{} + +// Validate checks the field values on ListString with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ListString) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListString with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ListStringMultiError, or +// nil if none found. +func (m *ListString) ValidateAll() error { + return m.validate(true) +} + +func (m *ListString) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ListStringMultiError(errors) + } + + return nil +} + +// ListStringMultiError is an error wrapping multiple validation errors +// returned by ListString.ValidateAll() if the designated constraints aren't met. +type ListStringMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListStringMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListStringMultiError) AllErrors() []error { return m } + +// ListStringValidationError is the validation error returned by +// ListString.Validate if the designated constraints aren't met. +type ListStringValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListStringValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListStringValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListStringValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListStringValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListStringValidationError) ErrorName() string { return "ListStringValidationError" } + +// Error satisfies the builtin error interface +func (e ListStringValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListString.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListStringValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListStringValidationError{} + +// Validate checks the field values on LogOptions with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LogOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LogOptions with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LogOptionsMultiError, or +// nil if none found. +func (m *LogOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *LogOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + { + sorted_keys := make([]string, len(m.GetFilters())) + i := 0 + for key := range m.GetFilters() { + sorted_keys[i] = key + i++ + } + sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] }) + for _, key := range sorted_keys { + val := m.GetFilters()[key] + _ = val + + // no validation rules for Filters[key] + + if all { + switch v := interface{}(val).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, LogOptionsValidationError{ + field: fmt.Sprintf("Filters[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, LogOptionsValidationError{ + field: fmt.Sprintf("Filters[%v]", key), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(val).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return LogOptionsValidationError{ + field: fmt.Sprintf("Filters[%v]", key), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + } + + if len(errors) > 0 { + return LogOptionsMultiError(errors) + } + + return nil +} + +// LogOptionsMultiError is an error wrapping multiple validation errors +// returned by LogOptions.ValidateAll() if the designated constraints aren't met. +type LogOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LogOptionsMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LogOptionsMultiError) AllErrors() []error { return m } + +// LogOptionsValidationError is the validation error returned by +// LogOptions.Validate if the designated constraints aren't met. +type LogOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LogOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LogOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LogOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LogOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LogOptionsValidationError) ErrorName() string { return "LogOptionsValidationError" } + +// Error satisfies the builtin error interface +func (e LogOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLogOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LogOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LogOptionsValidationError{} + +// Validate checks the field values on ResourceState with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceState) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceState with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceStateMultiError, or +// nil if none found. +func (m *ResourceState) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceState) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Status + + if all { + switch v := interface{}(m.GetOutput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceStateValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceStateValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceStateValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ModuleData + + if all { + switch v := interface{}(m.GetLogOptions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceStateValidationError{ + field: "LogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceStateValidationError{ + field: "LogOptions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogOptions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceStateValidationError{ + field: "LogOptions", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SyncRetries + + // no validation rules for SyncLastError + + if all { + switch v := interface{}(m.GetNextSyncAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceStateValidationError{ + field: "NextSyncAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceStateValidationError{ + field: "NextSyncAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNextSyncAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceStateValidationError{ + field: "NextSyncAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceStateMultiError(errors) + } + + return nil +} + +// ResourceStateMultiError is an error wrapping multiple validation errors +// returned by ResourceState.ValidateAll() if the designated constraints +// aren't met. +type ResourceStateMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceStateMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceStateMultiError) AllErrors() []error { return m } + +// ResourceStateValidationError is the validation error returned by +// ResourceState.Validate if the designated constraints aren't met. +type ResourceStateValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceStateValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceStateValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceStateValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceStateValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceStateValidationError) ErrorName() string { return "ResourceStateValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceStateValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceState.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceStateValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceStateValidationError{} + +// Validate checks the field values on Resource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Resource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceMultiError, or nil +// if none found. +func (m *Resource) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + // no validation rules for Kind + + // no validation rules for Name + + // no validation rules for Project + + // no validation rules for Labels + + if all { + switch v := interface{}(m.GetCreatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetUpdatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "UpdatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSpec()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Spec", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "Spec", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSpec()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "Spec", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetState()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "State", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "State", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetState()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "State", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for CreatedBy + + // no validation rules for UpdatedBy + + if len(errors) > 0 { + return ResourceMultiError(errors) + } + + return nil +} + +// ResourceMultiError is an error wrapping multiple validation errors returned +// by Resource.ValidateAll() if the designated constraints aren't met. +type ResourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMultiError) AllErrors() []error { return m } + +// ResourceValidationError is the validation error returned by +// Resource.Validate if the designated constraints aren't met. +type ResourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceValidationError{} + +// Validate checks the field values on ListResourcesRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListResourcesRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListResourcesRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListResourcesRequestMultiError, or nil if none found. +func (m *ListResourcesRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ListResourcesRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Project + + // no validation rules for Kind + + // no validation rules for Labels + + // no validation rules for WithSpecConfigs + + // no validation rules for PageSize + + // no validation rules for PageNum + + if len(errors) > 0 { + return ListResourcesRequestMultiError(errors) + } + + return nil +} + +// ListResourcesRequestMultiError is an error wrapping multiple validation +// errors returned by ListResourcesRequest.ValidateAll() if the designated +// constraints aren't met. +type ListResourcesRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListResourcesRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListResourcesRequestMultiError) AllErrors() []error { return m } + +// ListResourcesRequestValidationError is the validation error returned by +// ListResourcesRequest.Validate if the designated constraints aren't met. +type ListResourcesRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListResourcesRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListResourcesRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListResourcesRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListResourcesRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListResourcesRequestValidationError) ErrorName() string { + return "ListResourcesRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ListResourcesRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListResourcesRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListResourcesRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListResourcesRequestValidationError{} + +// Validate checks the field values on ListResourcesResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListResourcesResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListResourcesResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListResourcesResponseMultiError, or nil if none found. +func (m *ListResourcesResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ListResourcesResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetResources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListResourcesResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListResourcesResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListResourcesResponseValidationError{ + field: fmt.Sprintf("Resources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Count + + if len(errors) > 0 { + return ListResourcesResponseMultiError(errors) + } + + return nil +} + +// ListResourcesResponseMultiError is an error wrapping multiple validation +// errors returned by ListResourcesResponse.ValidateAll() if the designated +// constraints aren't met. +type ListResourcesResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListResourcesResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListResourcesResponseMultiError) AllErrors() []error { return m } + +// ListResourcesResponseValidationError is the validation error returned by +// ListResourcesResponse.Validate if the designated constraints aren't met. +type ListResourcesResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListResourcesResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListResourcesResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListResourcesResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListResourcesResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListResourcesResponseValidationError) ErrorName() string { + return "ListResourcesResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ListResourcesResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListResourcesResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListResourcesResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListResourcesResponseValidationError{} + +// Validate checks the field values on GetResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetResourceRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetResourceRequestMultiError, or nil if none found. +func (m *GetResourceRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetResourceRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + if len(errors) > 0 { + return GetResourceRequestMultiError(errors) + } + + return nil +} + +// GetResourceRequestMultiError is an error wrapping multiple validation errors +// returned by GetResourceRequest.ValidateAll() if the designated constraints +// aren't met. +type GetResourceRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetResourceRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetResourceRequestMultiError) AllErrors() []error { return m } + +// GetResourceRequestValidationError is the validation error returned by +// GetResourceRequest.Validate if the designated constraints aren't met. +type GetResourceRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetResourceRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetResourceRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetResourceRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetResourceRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetResourceRequestValidationError) ErrorName() string { + return "GetResourceRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetResourceRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetResourceRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetResourceRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetResourceRequestValidationError{} + +// Validate checks the field values on GetResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetResourceResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetResourceResponseMultiError, or nil if none found. +func (m *GetResourceResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetResourceResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetResourceResponseMultiError(errors) + } + + return nil +} + +// GetResourceResponseMultiError is an error wrapping multiple validation +// errors returned by GetResourceResponse.ValidateAll() if the designated +// constraints aren't met. +type GetResourceResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetResourceResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetResourceResponseMultiError) AllErrors() []error { return m } + +// GetResourceResponseValidationError is the validation error returned by +// GetResourceResponse.Validate if the designated constraints aren't met. +type GetResourceResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetResourceResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetResourceResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetResourceResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetResourceResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetResourceResponseValidationError) ErrorName() string { + return "GetResourceResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetResourceResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetResourceResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetResourceResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetResourceResponseValidationError{} + +// Validate checks the field values on CreateResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateResourceRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateResourceRequestMultiError, or nil if none found. +func (m *CreateResourceRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateResourceRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateResourceRequestValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateResourceRequestValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateResourceRequestValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DryRun + + if len(errors) > 0 { + return CreateResourceRequestMultiError(errors) + } + + return nil +} + +// CreateResourceRequestMultiError is an error wrapping multiple validation +// errors returned by CreateResourceRequest.ValidateAll() if the designated +// constraints aren't met. +type CreateResourceRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateResourceRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateResourceRequestMultiError) AllErrors() []error { return m } + +// CreateResourceRequestValidationError is the validation error returned by +// CreateResourceRequest.Validate if the designated constraints aren't met. +type CreateResourceRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateResourceRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateResourceRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateResourceRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateResourceRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateResourceRequestValidationError) ErrorName() string { + return "CreateResourceRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateResourceRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateResourceRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateResourceRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateResourceRequestValidationError{} + +// Validate checks the field values on CreateResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateResourceResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateResourceResponseMultiError, or nil if none found. +func (m *CreateResourceResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateResourceResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CreateResourceResponseMultiError(errors) + } + + return nil +} + +// CreateResourceResponseMultiError is an error wrapping multiple validation +// errors returned by CreateResourceResponse.ValidateAll() if the designated +// constraints aren't met. +type CreateResourceResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateResourceResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateResourceResponseMultiError) AllErrors() []error { return m } + +// CreateResourceResponseValidationError is the validation error returned by +// CreateResourceResponse.Validate if the designated constraints aren't met. +type CreateResourceResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateResourceResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateResourceResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateResourceResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateResourceResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateResourceResponseValidationError) ErrorName() string { + return "CreateResourceResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateResourceResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateResourceResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateResourceResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateResourceResponseValidationError{} + +// Validate checks the field values on UpdateResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateResourceRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateResourceRequestMultiError, or nil if none found. +func (m *UpdateResourceRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateResourceRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + if all { + switch v := interface{}(m.GetNewSpec()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateResourceRequestValidationError{ + field: "NewSpec", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateResourceRequestValidationError{ + field: "NewSpec", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetNewSpec()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateResourceRequestValidationError{ + field: "NewSpec", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Labels + + // no validation rules for DryRun + + if len(errors) > 0 { + return UpdateResourceRequestMultiError(errors) + } + + return nil +} + +// UpdateResourceRequestMultiError is an error wrapping multiple validation +// errors returned by UpdateResourceRequest.ValidateAll() if the designated +// constraints aren't met. +type UpdateResourceRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateResourceRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateResourceRequestMultiError) AllErrors() []error { return m } + +// UpdateResourceRequestValidationError is the validation error returned by +// UpdateResourceRequest.Validate if the designated constraints aren't met. +type UpdateResourceRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateResourceRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateResourceRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateResourceRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateResourceRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateResourceRequestValidationError) ErrorName() string { + return "UpdateResourceRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateResourceRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateResourceRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateResourceRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateResourceRequestValidationError{} + +// Validate checks the field values on UpdateResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateResourceResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateResourceResponseMultiError, or nil if none found. +func (m *UpdateResourceResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateResourceResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateResourceResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return UpdateResourceResponseMultiError(errors) + } + + return nil +} + +// UpdateResourceResponseMultiError is an error wrapping multiple validation +// errors returned by UpdateResourceResponse.ValidateAll() if the designated +// constraints aren't met. +type UpdateResourceResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateResourceResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateResourceResponseMultiError) AllErrors() []error { return m } + +// UpdateResourceResponseValidationError is the validation error returned by +// UpdateResourceResponse.Validate if the designated constraints aren't met. +type UpdateResourceResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateResourceResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateResourceResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateResourceResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateResourceResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateResourceResponseValidationError) ErrorName() string { + return "UpdateResourceResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateResourceResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateResourceResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateResourceResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateResourceResponseValidationError{} + +// Validate checks the field values on DeleteResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeleteResourceRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeleteResourceRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeleteResourceRequestMultiError, or nil if none found. +func (m *DeleteResourceRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DeleteResourceRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + if len(errors) > 0 { + return DeleteResourceRequestMultiError(errors) + } + + return nil +} + +// DeleteResourceRequestMultiError is an error wrapping multiple validation +// errors returned by DeleteResourceRequest.ValidateAll() if the designated +// constraints aren't met. +type DeleteResourceRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeleteResourceRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeleteResourceRequestMultiError) AllErrors() []error { return m } + +// DeleteResourceRequestValidationError is the validation error returned by +// DeleteResourceRequest.Validate if the designated constraints aren't met. +type DeleteResourceRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeleteResourceRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeleteResourceRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeleteResourceRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeleteResourceRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeleteResourceRequestValidationError) ErrorName() string { + return "DeleteResourceRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e DeleteResourceRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeleteResourceRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeleteResourceRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeleteResourceRequestValidationError{} + +// Validate checks the field values on DeleteResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeleteResourceResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeleteResourceResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeleteResourceResponseMultiError, or nil if none found. +func (m *DeleteResourceResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *DeleteResourceResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return DeleteResourceResponseMultiError(errors) + } + + return nil +} + +// DeleteResourceResponseMultiError is an error wrapping multiple validation +// errors returned by DeleteResourceResponse.ValidateAll() if the designated +// constraints aren't met. +type DeleteResourceResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeleteResourceResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeleteResourceResponseMultiError) AllErrors() []error { return m } + +// DeleteResourceResponseValidationError is the validation error returned by +// DeleteResourceResponse.Validate if the designated constraints aren't met. +type DeleteResourceResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeleteResourceResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeleteResourceResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeleteResourceResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeleteResourceResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeleteResourceResponseValidationError) ErrorName() string { + return "DeleteResourceResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e DeleteResourceResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeleteResourceResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeleteResourceResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeleteResourceResponseValidationError{} + +// Validate checks the field values on ApplyActionRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ApplyActionRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApplyActionRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ApplyActionRequestMultiError, or nil if none found. +func (m *ApplyActionRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ApplyActionRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + // no validation rules for Action + + if all { + switch v := interface{}(m.GetParams()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApplyActionRequestValidationError{ + field: "Params", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApplyActionRequestValidationError{ + field: "Params", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParams()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApplyActionRequestValidationError{ + field: "Params", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Labels + + // no validation rules for DryRun + + if len(errors) > 0 { + return ApplyActionRequestMultiError(errors) + } + + return nil +} + +// ApplyActionRequestMultiError is an error wrapping multiple validation errors +// returned by ApplyActionRequest.ValidateAll() if the designated constraints +// aren't met. +type ApplyActionRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApplyActionRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApplyActionRequestMultiError) AllErrors() []error { return m } + +// ApplyActionRequestValidationError is the validation error returned by +// ApplyActionRequest.Validate if the designated constraints aren't met. +type ApplyActionRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApplyActionRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApplyActionRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApplyActionRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApplyActionRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApplyActionRequestValidationError) ErrorName() string { + return "ApplyActionRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ApplyActionRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApplyActionRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApplyActionRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApplyActionRequestValidationError{} + +// Validate checks the field values on ApplyActionResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ApplyActionResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ApplyActionResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ApplyActionResponseMultiError, or nil if none found. +func (m *ApplyActionResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ApplyActionResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ApplyActionResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ApplyActionResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ApplyActionResponseValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ApplyActionResponseMultiError(errors) + } + + return nil +} + +// ApplyActionResponseMultiError is an error wrapping multiple validation +// errors returned by ApplyActionResponse.ValidateAll() if the designated +// constraints aren't met. +type ApplyActionResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ApplyActionResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ApplyActionResponseMultiError) AllErrors() []error { return m } + +// ApplyActionResponseValidationError is the validation error returned by +// ApplyActionResponse.Validate if the designated constraints aren't met. +type ApplyActionResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ApplyActionResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ApplyActionResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ApplyActionResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ApplyActionResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ApplyActionResponseValidationError) ErrorName() string { + return "ApplyActionResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ApplyActionResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sApplyActionResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ApplyActionResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ApplyActionResponseValidationError{} + +// Validate checks the field values on LogChunk with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *LogChunk) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on LogChunk with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in LogChunkMultiError, or nil +// if none found. +func (m *LogChunk) ValidateAll() error { + return m.validate(true) +} + +func (m *LogChunk) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Data + + // no validation rules for Labels + + if len(errors) > 0 { + return LogChunkMultiError(errors) + } + + return nil +} + +// LogChunkMultiError is an error wrapping multiple validation errors returned +// by LogChunk.ValidateAll() if the designated constraints aren't met. +type LogChunkMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m LogChunkMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m LogChunkMultiError) AllErrors() []error { return m } + +// LogChunkValidationError is the validation error returned by +// LogChunk.Validate if the designated constraints aren't met. +type LogChunkValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e LogChunkValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e LogChunkValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e LogChunkValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e LogChunkValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e LogChunkValidationError) ErrorName() string { return "LogChunkValidationError" } + +// Error satisfies the builtin error interface +func (e LogChunkValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sLogChunk.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = LogChunkValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = LogChunkValidationError{} + +// Validate checks the field values on GetLogRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GetLogRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetLogRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GetLogRequestMultiError, or +// nil if none found. +func (m *GetLogRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetLogRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + // no validation rules for Filter + + if len(errors) > 0 { + return GetLogRequestMultiError(errors) + } + + return nil +} + +// GetLogRequestMultiError is an error wrapping multiple validation errors +// returned by GetLogRequest.ValidateAll() if the designated constraints +// aren't met. +type GetLogRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetLogRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetLogRequestMultiError) AllErrors() []error { return m } + +// GetLogRequestValidationError is the validation error returned by +// GetLogRequest.Validate if the designated constraints aren't met. +type GetLogRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetLogRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetLogRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetLogRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetLogRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetLogRequestValidationError) ErrorName() string { return "GetLogRequestValidationError" } + +// Error satisfies the builtin error interface +func (e GetLogRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetLogRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetLogRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetLogRequestValidationError{} + +// Validate checks the field values on GetLogResponse with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GetLogResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetLogResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GetLogResponseMultiError, +// or nil if none found. +func (m *GetLogResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetLogResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetChunk()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetLogResponseValidationError{ + field: "Chunk", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetLogResponseValidationError{ + field: "Chunk", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetChunk()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetLogResponseValidationError{ + field: "Chunk", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetLogResponseMultiError(errors) + } + + return nil +} + +// GetLogResponseMultiError is an error wrapping multiple validation errors +// returned by GetLogResponse.ValidateAll() if the designated constraints +// aren't met. +type GetLogResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetLogResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetLogResponseMultiError) AllErrors() []error { return m } + +// GetLogResponseValidationError is the validation error returned by +// GetLogResponse.Validate if the designated constraints aren't met. +type GetLogResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetLogResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetLogResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetLogResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetLogResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetLogResponseValidationError) ErrorName() string { return "GetLogResponseValidationError" } + +// Error satisfies the builtin error interface +func (e GetLogResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetLogResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetLogResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetLogResponseValidationError{} + +// Validate checks the field values on ResourceRevision with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceRevision) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceRevision with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceRevisionMultiError, or nil if none found. +func (m *ResourceRevision) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceRevision) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + // no validation rules for Urn + + // no validation rules for Labels + + if all { + switch v := interface{}(m.GetCreatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceRevisionValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceRevisionValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceRevisionValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetSpec()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceRevisionValidationError{ + field: "Spec", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceRevisionValidationError{ + field: "Spec", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSpec()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceRevisionValidationError{ + field: "Spec", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Reason + + // no validation rules for CreatedBy + + if len(errors) > 0 { + return ResourceRevisionMultiError(errors) + } + + return nil +} + +// ResourceRevisionMultiError is an error wrapping multiple validation errors +// returned by ResourceRevision.ValidateAll() if the designated constraints +// aren't met. +type ResourceRevisionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceRevisionMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceRevisionMultiError) AllErrors() []error { return m } + +// ResourceRevisionValidationError is the validation error returned by +// ResourceRevision.Validate if the designated constraints aren't met. +type ResourceRevisionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceRevisionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceRevisionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceRevisionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceRevisionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceRevisionValidationError) ErrorName() string { return "ResourceRevisionValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceRevisionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceRevision.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceRevisionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceRevisionValidationError{} + +// Validate checks the field values on GetResourceRevisionsRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetResourceRevisionsRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetResourceRevisionsRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetResourceRevisionsRequestMultiError, or nil if none found. +func (m *GetResourceRevisionsRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetResourceRevisionsRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Urn + + if len(errors) > 0 { + return GetResourceRevisionsRequestMultiError(errors) + } + + return nil +} + +// GetResourceRevisionsRequestMultiError is an error wrapping multiple +// validation errors returned by GetResourceRevisionsRequest.ValidateAll() if +// the designated constraints aren't met. +type GetResourceRevisionsRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetResourceRevisionsRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetResourceRevisionsRequestMultiError) AllErrors() []error { return m } + +// GetResourceRevisionsRequestValidationError is the validation error returned +// by GetResourceRevisionsRequest.Validate if the designated constraints +// aren't met. +type GetResourceRevisionsRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetResourceRevisionsRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetResourceRevisionsRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetResourceRevisionsRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetResourceRevisionsRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetResourceRevisionsRequestValidationError) ErrorName() string { + return "GetResourceRevisionsRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetResourceRevisionsRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetResourceRevisionsRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetResourceRevisionsRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetResourceRevisionsRequestValidationError{} + +// Validate checks the field values on GetResourceRevisionsResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetResourceRevisionsResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetResourceRevisionsResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetResourceRevisionsResponseMultiError, or nil if none found. +func (m *GetResourceRevisionsResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetResourceRevisionsResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetRevisions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetResourceRevisionsResponseValidationError{ + field: fmt.Sprintf("Revisions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetResourceRevisionsResponseValidationError{ + field: fmt.Sprintf("Revisions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetResourceRevisionsResponseValidationError{ + field: fmt.Sprintf("Revisions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return GetResourceRevisionsResponseMultiError(errors) + } + + return nil +} + +// GetResourceRevisionsResponseMultiError is an error wrapping multiple +// validation errors returned by GetResourceRevisionsResponse.ValidateAll() if +// the designated constraints aren't met. +type GetResourceRevisionsResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetResourceRevisionsResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetResourceRevisionsResponseMultiError) AllErrors() []error { return m } + +// GetResourceRevisionsResponseValidationError is the validation error returned +// by GetResourceRevisionsResponse.Validate if the designated constraints +// aren't met. +type GetResourceRevisionsResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetResourceRevisionsResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetResourceRevisionsResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetResourceRevisionsResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetResourceRevisionsResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetResourceRevisionsResponseValidationError) ErrorName() string { + return "GetResourceRevisionsResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetResourceRevisionsResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetResourceRevisionsResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetResourceRevisionsResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetResourceRevisionsResponseValidationError{} diff --git a/proto/gotocompany/entropy/v1beta1/resource_grpc.pb.go b/proto/gotocompany/entropy/v1beta1/resource_grpc.pb.go new file mode 100644 index 00000000..2520fddc --- /dev/null +++ b/proto/gotocompany/entropy/v1beta1/resource_grpc.pb.go @@ -0,0 +1,396 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: gotocompany/entropy/v1beta1/resource.proto + +package entropyv1beta1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ResourceService_ListResources_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/ListResources" + ResourceService_GetResource_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/GetResource" + ResourceService_CreateResource_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/CreateResource" + ResourceService_UpdateResource_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/UpdateResource" + ResourceService_DeleteResource_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/DeleteResource" + ResourceService_ApplyAction_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/ApplyAction" + ResourceService_GetLog_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/GetLog" + ResourceService_GetResourceRevisions_FullMethodName = "/gotocompany.entropy.v1beta1.ResourceService/GetResourceRevisions" +) + +// ResourceServiceClient is the client API for ResourceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ResourceServiceClient interface { + ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) + GetResource(ctx context.Context, in *GetResourceRequest, opts ...grpc.CallOption) (*GetResourceResponse, error) + CreateResource(ctx context.Context, in *CreateResourceRequest, opts ...grpc.CallOption) (*CreateResourceResponse, error) + UpdateResource(ctx context.Context, in *UpdateResourceRequest, opts ...grpc.CallOption) (*UpdateResourceResponse, error) + DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*DeleteResourceResponse, error) + ApplyAction(ctx context.Context, in *ApplyActionRequest, opts ...grpc.CallOption) (*ApplyActionResponse, error) + GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (ResourceService_GetLogClient, error) + GetResourceRevisions(ctx context.Context, in *GetResourceRevisionsRequest, opts ...grpc.CallOption) (*GetResourceRevisionsResponse, error) +} + +type resourceServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewResourceServiceClient(cc grpc.ClientConnInterface) ResourceServiceClient { + return &resourceServiceClient{cc} +} + +func (c *resourceServiceClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { + out := new(ListResourcesResponse) + err := c.cc.Invoke(ctx, ResourceService_ListResources_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceServiceClient) GetResource(ctx context.Context, in *GetResourceRequest, opts ...grpc.CallOption) (*GetResourceResponse, error) { + out := new(GetResourceResponse) + err := c.cc.Invoke(ctx, ResourceService_GetResource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceServiceClient) CreateResource(ctx context.Context, in *CreateResourceRequest, opts ...grpc.CallOption) (*CreateResourceResponse, error) { + out := new(CreateResourceResponse) + err := c.cc.Invoke(ctx, ResourceService_CreateResource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceServiceClient) UpdateResource(ctx context.Context, in *UpdateResourceRequest, opts ...grpc.CallOption) (*UpdateResourceResponse, error) { + out := new(UpdateResourceResponse) + err := c.cc.Invoke(ctx, ResourceService_UpdateResource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceServiceClient) DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*DeleteResourceResponse, error) { + out := new(DeleteResourceResponse) + err := c.cc.Invoke(ctx, ResourceService_DeleteResource_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceServiceClient) ApplyAction(ctx context.Context, in *ApplyActionRequest, opts ...grpc.CallOption) (*ApplyActionResponse, error) { + out := new(ApplyActionResponse) + err := c.cc.Invoke(ctx, ResourceService_ApplyAction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceServiceClient) GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (ResourceService_GetLogClient, error) { + stream, err := c.cc.NewStream(ctx, &ResourceService_ServiceDesc.Streams[0], ResourceService_GetLog_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &resourceServiceGetLogClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ResourceService_GetLogClient interface { + Recv() (*GetLogResponse, error) + grpc.ClientStream +} + +type resourceServiceGetLogClient struct { + grpc.ClientStream +} + +func (x *resourceServiceGetLogClient) Recv() (*GetLogResponse, error) { + m := new(GetLogResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *resourceServiceClient) GetResourceRevisions(ctx context.Context, in *GetResourceRevisionsRequest, opts ...grpc.CallOption) (*GetResourceRevisionsResponse, error) { + out := new(GetResourceRevisionsResponse) + err := c.cc.Invoke(ctx, ResourceService_GetResourceRevisions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ResourceServiceServer is the server API for ResourceService service. +// All implementations must embed UnimplementedResourceServiceServer +// for forward compatibility +type ResourceServiceServer interface { + ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) + GetResource(context.Context, *GetResourceRequest) (*GetResourceResponse, error) + CreateResource(context.Context, *CreateResourceRequest) (*CreateResourceResponse, error) + UpdateResource(context.Context, *UpdateResourceRequest) (*UpdateResourceResponse, error) + DeleteResource(context.Context, *DeleteResourceRequest) (*DeleteResourceResponse, error) + ApplyAction(context.Context, *ApplyActionRequest) (*ApplyActionResponse, error) + GetLog(*GetLogRequest, ResourceService_GetLogServer) error + GetResourceRevisions(context.Context, *GetResourceRevisionsRequest) (*GetResourceRevisionsResponse, error) + mustEmbedUnimplementedResourceServiceServer() +} + +// UnimplementedResourceServiceServer must be embedded to have forward compatible implementations. +type UnimplementedResourceServiceServer struct { +} + +func (UnimplementedResourceServiceServer) ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListResources not implemented") +} +func (UnimplementedResourceServiceServer) GetResource(context.Context, *GetResourceRequest) (*GetResourceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetResource not implemented") +} +func (UnimplementedResourceServiceServer) CreateResource(context.Context, *CreateResourceRequest) (*CreateResourceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateResource not implemented") +} +func (UnimplementedResourceServiceServer) UpdateResource(context.Context, *UpdateResourceRequest) (*UpdateResourceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateResource not implemented") +} +func (UnimplementedResourceServiceServer) DeleteResource(context.Context, *DeleteResourceRequest) (*DeleteResourceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteResource not implemented") +} +func (UnimplementedResourceServiceServer) ApplyAction(context.Context, *ApplyActionRequest) (*ApplyActionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyAction not implemented") +} +func (UnimplementedResourceServiceServer) GetLog(*GetLogRequest, ResourceService_GetLogServer) error { + return status.Errorf(codes.Unimplemented, "method GetLog not implemented") +} +func (UnimplementedResourceServiceServer) GetResourceRevisions(context.Context, *GetResourceRevisionsRequest) (*GetResourceRevisionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetResourceRevisions not implemented") +} +func (UnimplementedResourceServiceServer) mustEmbedUnimplementedResourceServiceServer() {} + +// UnsafeResourceServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ResourceServiceServer will +// result in compilation errors. +type UnsafeResourceServiceServer interface { + mustEmbedUnimplementedResourceServiceServer() +} + +func RegisterResourceServiceServer(s grpc.ServiceRegistrar, srv ResourceServiceServer) { + s.RegisterService(&ResourceService_ServiceDesc, srv) +} + +func _ResourceService_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceServiceServer).ListResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ResourceService_ListResources_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceServiceServer).ListResources(ctx, req.(*ListResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceService_GetResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceServiceServer).GetResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ResourceService_GetResource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceServiceServer).GetResource(ctx, req.(*GetResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceService_CreateResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceServiceServer).CreateResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ResourceService_CreateResource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceServiceServer).CreateResource(ctx, req.(*CreateResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceService_UpdateResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceServiceServer).UpdateResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ResourceService_UpdateResource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceServiceServer).UpdateResource(ctx, req.(*UpdateResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceService_DeleteResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceServiceServer).DeleteResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ResourceService_DeleteResource_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceServiceServer).DeleteResource(ctx, req.(*DeleteResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceService_ApplyAction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyActionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceServiceServer).ApplyAction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ResourceService_ApplyAction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceServiceServer).ApplyAction(ctx, req.(*ApplyActionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceService_GetLog_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetLogRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ResourceServiceServer).GetLog(m, &resourceServiceGetLogServer{stream}) +} + +type ResourceService_GetLogServer interface { + Send(*GetLogResponse) error + grpc.ServerStream +} + +type resourceServiceGetLogServer struct { + grpc.ServerStream +} + +func (x *resourceServiceGetLogServer) Send(m *GetLogResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _ResourceService_GetResourceRevisions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourceRevisionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceServiceServer).GetResourceRevisions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ResourceService_GetResourceRevisions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceServiceServer).GetResourceRevisions(ctx, req.(*GetResourceRevisionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ResourceService_ServiceDesc is the grpc.ServiceDesc for ResourceService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ResourceService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "gotocompany.entropy.v1beta1.ResourceService", + HandlerType: (*ResourceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListResources", + Handler: _ResourceService_ListResources_Handler, + }, + { + MethodName: "GetResource", + Handler: _ResourceService_GetResource_Handler, + }, + { + MethodName: "CreateResource", + Handler: _ResourceService_CreateResource_Handler, + }, + { + MethodName: "UpdateResource", + Handler: _ResourceService_UpdateResource_Handler, + }, + { + MethodName: "DeleteResource", + Handler: _ResourceService_DeleteResource_Handler, + }, + { + MethodName: "ApplyAction", + Handler: _ResourceService_ApplyAction_Handler, + }, + { + MethodName: "GetResourceRevisions", + Handler: _ResourceService_GetResourceRevisions_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetLog", + Handler: _ResourceService_GetLog_Handler, + ServerStreams: true, + }, + }, + Metadata: "gotocompany/entropy/v1beta1/resource.proto", +} diff --git a/test/e2e_test/firehose_helper_test.go b/test/e2e_test/firehose_helper_test.go new file mode 100644 index 00000000..086bd849 --- /dev/null +++ b/test/e2e_test/firehose_helper_test.go @@ -0,0 +1,56 @@ +package e2e_test + +import ( + "context" + "encoding/json" + "os" + "time" + + "github.com/goto/entropy/pkg/kube" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/entropy/test/testbench" + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/kind/pkg/cluster" +) + +func getRunningFirehosePods(ctx context.Context, kubeProvider *cluster.Provider, clusterName, namespace string, labels map[string]string, waitTime time.Duration) ([]kube.Pod, error) { + host, clientCertificate, clientKey, err := testbench.GetClusterCredentials(kubeProvider, clusterName) + if err != nil { + return nil, err + } + + kubeClient, err := kube.NewClient(ctx, kube.Config{ + Host: host, + Insecure: true, + ClientCertificate: clientCertificate, + ClientKey: clientKey, + }) + if err != nil { + return nil, err + } + + time.Sleep(waitTime) + pods, err := kubeClient.GetPodDetails(ctx, namespace, labels, func(pod v1.Pod) bool { + return pod.Status.Phase == v1.PodRunning + }) + if err != nil { + return nil, err + } + + return pods, nil +} + +func getFirehoseResourceRequest() (*entropyv1beta1.Resource, error) { + resourceData, err := os.ReadFile(testbench.TestDataPath + "/resource/firehose_resource.json") + if err != nil { + return nil, err + } + + var resourceConfig *entropyv1beta1.Resource + err = json.Unmarshal(resourceData, &resourceConfig) + if err != nil { + return nil, err + } + + return resourceConfig, nil +} diff --git a/test/e2e_test/firehose_test.go b/test/e2e_test/firehose_test.go new file mode 100644 index 00000000..d411a613 --- /dev/null +++ b/test/e2e_test/firehose_test.go @@ -0,0 +1,100 @@ +package e2e_test + +import ( + "context" + "testing" + "time" + + "github.com/goto/entropy/cli" + "github.com/goto/entropy/core/resource" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/entropy/test/testbench" + "github.com/ory/dockertest/v3" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" + "sigs.k8s.io/kind/pkg/cluster" +) + +type FirehoseTestSuite struct { + suite.Suite + ctx context.Context + moduleClient entropyv1beta1.ModuleServiceClient + resourceClient entropyv1beta1.ResourceServiceClient + cancelResourceClient func() + cancelModuleClient func() + cancel func() + appConfig *cli.Config + pool *dockertest.Pool + resource *dockertest.Resource + kubeProvider *cluster.Provider +} + +func (s *FirehoseTestSuite) SetupTest() { + s.ctx, s.moduleClient, s.resourceClient, s.appConfig, s.pool, s.resource, s.kubeProvider, s.cancelModuleClient, s.cancelResourceClient, s.cancel = testbench.SetupTests(s.T(), true, true) + + modules, err := s.moduleClient.ListModules(s.ctx, &entropyv1beta1.ListModulesRequest{}) + s.Require().NoError(err) + s.Require().Equal(9, len(modules.GetModules())) + + resources, err := s.resourceClient.ListResources(s.ctx, &entropyv1beta1.ListResourcesRequest{ + Kind: "kubernetes", + }) + s.Require().NoError(err) + s.Require().Equal(3, len(resources.GetResources())) +} + +func (s *FirehoseTestSuite) TestCreateFirehose() { + s.Run("create firehose with invalid request body should return invalid error", func() { + _, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: &entropyv1beta1.Resource{ + Name: "test-firehose", + Project: "test-project-0", + Kind: "firehose", + Spec: &entropyv1beta1.ResourceSpec{ + Configs: structpb.NewStringValue("{}"), + Dependencies: []*entropyv1beta1.ResourceDependency{}, + }, + }, + }) + s.Assert().Equal(codes.InvalidArgument, status.Convert(err).Code()) + }) + + s.Run("create firehose with right request body should return no error and run a new firehose resource", func() { + resourceConfig, err := getFirehoseResourceRequest() + s.Require().NoError(err) + + resp, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }) + s.Require().NoError(err) + + pods, err := getRunningFirehosePods(s.ctx, s.kubeProvider, testbench.TestClusterName, testbench.TestNamespace, map[string]string{}, 90*time.Second) + s.Require().NoError(err) + s.Require().Equal(1, len(pods)) + + createdFirehose, err := s.resourceClient.GetResource(s.ctx, &entropyv1beta1.GetResourceRequest{ + Urn: resp.GetResource().Urn, + }) + s.Require().NoError(err) + s.Require().NotNil(createdFirehose) + s.Require().Equal(resource.StatusCompleted, createdFirehose.Resource.State.Status.String()) + }) +} + +func (s *FirehoseTestSuite) TearDownTest() { + if err := s.pool.Purge(s.resource); err != nil { + s.T().Fatal(err) + } + + if err := s.kubeProvider.Delete(testbench.TestClusterName, ""); err != nil { + s.T().Fatal(err) + } + + s.cancel() +} + +func TestFirehoseTestSuite(t *testing.T) { + suite.Run(t, new(FirehoseTestSuite)) +} diff --git a/test/e2e_test/flink_test.go b/test/e2e_test/flink_test.go new file mode 100644 index 00000000..462859ea --- /dev/null +++ b/test/e2e_test/flink_test.go @@ -0,0 +1,138 @@ +package e2e_test + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/goto/entropy/cli" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/entropy/test/testbench" + "github.com/ory/dockertest/v3" + "github.com/stretchr/testify/suite" + "sigs.k8s.io/kind/pkg/cluster" +) + +type FlinkTestSuite struct { + suite.Suite + ctx context.Context + moduleClient entropyv1beta1.ModuleServiceClient + resourceClient entropyv1beta1.ResourceServiceClient + cancelModuleClient func() + cancelResourceClient func() + cancel func() + resource *dockertest.Resource + pool *dockertest.Pool + appConfig *cli.Config + kubeProvider *cluster.Provider +} + +func (s *FlinkTestSuite) SetupTest() { + s.ctx, s.moduleClient, s.resourceClient, s.appConfig, s.pool, s.resource, s.kubeProvider, s.cancelModuleClient, s.cancelResourceClient, s.cancel = testbench.SetupTests(s.T(), true, true) + + modules, err := s.moduleClient.ListModules(s.ctx, &entropyv1beta1.ListModulesRequest{}) + s.Require().NoError(err) + s.Require().Equal(9, len(modules.GetModules())) + + resources, err := s.resourceClient.ListResources(s.ctx, &entropyv1beta1.ListResourcesRequest{ + Kind: "kubernetes", + }) + s.Require().NoError(err) + s.Require().Equal(3, len(resources.GetResources())) +} + +func (s *FlinkTestSuite) TestFlink() { + s.Run("create flink module return success", func() { + moduleData, err := os.ReadFile(testbench.TestDataPath + "module/flink_module.json") + if err != nil { + s.T().Fatal(err) + } + + var moduleConfig *entropyv1beta1.Module + err = json.Unmarshal(moduleData, &moduleConfig) + if err != nil { + s.T().Fatal(err) + } + _, err = s.moduleClient.CreateModule(s.ctx, &entropyv1beta1.CreateModuleRequest{ + Module: moduleConfig, + }) + s.Require().NoError(err) + }) + /* + s.Run("create flink with invalid config will return invalid error", func() { + _, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: &entropyv1beta1.Resource{ + Name: "test-flink", + Project: "test-project", + Kind: "flink", + Spec: &entropyv1beta1.ResourceSpec{ + Configs: structpb.NewStringValue("{}"), + Dependencies: []*entropyv1beta1.ResourceDependency{}, + }, + }, + }) + s.Assert().Equal(codes.InvalidArgument, status.Convert(err).Code()) + }) + */ + s.Run("create flink with right config will return success", func() { + resourceData, err := os.ReadFile(testbench.TestDataPath + "/resource/flink_resource.json") + if err != nil { + s.T().Fatal(err) + } + + var resourceConfig *entropyv1beta1.Resource + err = json.Unmarshal(resourceData, &resourceConfig) + if err != nil { + s.T().Fatal(err) + } + + _, err = s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }) + s.Require().NoError(err) + }) + + resources, err := s.resourceClient.ListResources(s.ctx, &entropyv1beta1.ListResourcesRequest{ + Kind: "flink", + }) + s.Require().NoError(err) + s.Require().Equal(1, len(resources.GetResources())) + + s.Run("update flink with right config will return success", func() { + resourceData, err := os.ReadFile(testbench.TestDataPath + "/resource/flink_resource.json") + if err != nil { + s.T().Fatal(err) + } + + var resourceConfig *entropyv1beta1.Resource + err = json.Unmarshal(resourceData, &resourceConfig) + if err != nil { + s.T().Fatal(err) + } + + resourceConfig.Spec.Dependencies = nil + + _, err = s.resourceClient.UpdateResource(s.ctx, &entropyv1beta1.UpdateResourceRequest{ + Urn: resources.GetResources()[0].Urn, + NewSpec: resourceConfig.Spec, + }) + s.Require().NoError(err) + }) +} + +func (s *FlinkTestSuite) TearDownTest() { + if err := s.pool.Purge(s.resource); err != nil { + s.T().Fatal(err) + } + + if err := s.kubeProvider.Delete(testbench.TestClusterName, ""); err != nil { + s.T().Fatal(err) + } + + s.cancel() +} + +func TestFlinkTestSuite(t *testing.T) { + suite.Run(t, new(FlinkTestSuite)) +} diff --git a/test/e2e_test/kafka_test.go b/test/e2e_test/kafka_test.go new file mode 100644 index 00000000..ce28239a --- /dev/null +++ b/test/e2e_test/kafka_test.go @@ -0,0 +1,119 @@ +package e2e_test + +import ( + "context" + "encoding/json" + "os" + "testing" + + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/entropy/test/testbench" + "github.com/ory/dockertest/v3" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" +) + +type KafkaTestSuite struct { + suite.Suite + ctx context.Context + moduleClient entropyv1beta1.ModuleServiceClient + resourceClient entropyv1beta1.ResourceServiceClient + cancelModuleClient func() + cancelResourceClient func() + cancel func() + resource *dockertest.Resource + pool *dockertest.Pool +} + +func (s *KafkaTestSuite) SetupTest() { + s.ctx, s.moduleClient, s.resourceClient, _, s.pool, s.resource, _, s.cancelModuleClient, s.cancelResourceClient, s.cancel = testbench.SetupTests(s.T(), false, false) +} + +func (s *KafkaTestSuite) TestKafka() { + s.Run("create kafka module return success", func() { + moduleData, err := os.ReadFile(testbench.TestDataPath + "/module/kafka_module.json") + if err != nil { + s.T().Fatal(err) + } + + var moduleConfig *entropyv1beta1.Module + err = json.Unmarshal(moduleData, &moduleConfig) + if err != nil { + s.T().Fatal(err) + } + _, err = s.moduleClient.CreateModule(s.ctx, &entropyv1beta1.CreateModuleRequest{ + Module: moduleConfig, + }) + s.Require().NoError(err) + }) + + s.Run("create kafka with invalid config will return invalid error", func() { + _, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: &entropyv1beta1.Resource{ + Name: "test-kafka", + Project: "test-project", + Kind: "kafka", + Spec: &entropyv1beta1.ResourceSpec{ + Configs: structpb.NewStringValue("{}"), + Dependencies: []*entropyv1beta1.ResourceDependency{}, + }, + }, + }) + s.Assert().Equal(codes.InvalidArgument, status.Convert(err).Code()) + }) + + s.Run("create kafka with right config will return success", func() { + resourceData, err := os.ReadFile(testbench.TestDataPath + "/resource/kafka_resource.json") + if err != nil { + s.T().Fatal(err) + } + + var resourceConfig *entropyv1beta1.Resource + err = json.Unmarshal(resourceData, &resourceConfig) + if err != nil { + s.T().Fatal(err) + } + + _, err = s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }) + s.Require().NoError(err) + }) + + resources, err := s.resourceClient.ListResources(s.ctx, &entropyv1beta1.ListResourcesRequest{ + Kind: "kafka", + }) + s.Require().NoError(err) + s.Require().Equal(1, len(resources.GetResources())) + + s.Run("update kafka with right config will return success", func() { + resourceData, err := os.ReadFile(testbench.TestDataPath + "/resource/kafka_resource.json") + if err != nil { + s.T().Fatal(err) + } + + var resourceConfig *entropyv1beta1.Resource + err = json.Unmarshal(resourceData, &resourceConfig) + if err != nil { + s.T().Fatal(err) + } + + _, err = s.resourceClient.UpdateResource(s.ctx, &entropyv1beta1.UpdateResourceRequest{ + Urn: resources.GetResources()[0].Urn, + NewSpec: resourceConfig.Spec, + }) + s.Require().NoError(err) + }) +} + +func (s *KafkaTestSuite) TearDownTest() { + if err := s.pool.Purge(s.resource); err != nil { + s.T().Fatal(err) + } +} + +func TestKafkaTestSuite(t *testing.T) { + suite.Run(t, new(KafkaTestSuite)) +} diff --git a/test/e2e_test/worker_test.go b/test/e2e_test/worker_test.go new file mode 100644 index 00000000..96b37bc3 --- /dev/null +++ b/test/e2e_test/worker_test.go @@ -0,0 +1,187 @@ +package e2e_test + +import ( + "context" + "testing" + "time" + + "github.com/goto/entropy/cli" + "github.com/goto/entropy/core/resource" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/entropy/test/testbench" + "github.com/ory/dockertest/v3" + "github.com/stretchr/testify/suite" + "sigs.k8s.io/kind/pkg/cluster" +) + +type WorkerTestSuite struct { + suite.Suite + ctx context.Context + moduleClient entropyv1beta1.ModuleServiceClient + resourceClient entropyv1beta1.ResourceServiceClient + cancelResourceClient func() + cancelModuleClient func() + cancel func() + appConfig *cli.Config + pool *dockertest.Pool + resource *dockertest.Resource + kubeProvider *cluster.Provider + resources []*entropyv1beta1.Resource +} + +func (s *WorkerTestSuite) SetupTest() { + s.ctx, s.moduleClient, s.resourceClient, s.appConfig, s.pool, s.resource, s.kubeProvider, s.cancelModuleClient, s.cancelResourceClient, s.cancel = testbench.SetupTests(s.T(), false, true) + + modules, err := s.moduleClient.ListModules(s.ctx, &entropyv1beta1.ListModulesRequest{}) + s.Require().NoError(err) + s.Require().Equal(9, len(modules.GetModules())) + + resources, err := s.resourceClient.ListResources(s.ctx, &entropyv1beta1.ListResourcesRequest{ + Kind: "kubernetes", + }) + s.Require().NoError(err) + s.Require().Equal(3, len(resources.GetResources())) + s.resources = resources.GetResources() + +} + +func (s *WorkerTestSuite) TestWorkerDefault() { + testbench.SetupWorker(s.T(), s.ctx, *s.appConfig) + + s.Run("running worker with default config will run one worker that takes any job", func() { + resourceConfig, err := getFirehoseResourceRequest() + s.Require().NoError(err) + + resp, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }) + s.Require().NoError(err) + + pods, err := getRunningFirehosePods(s.ctx, s.kubeProvider, testbench.TestClusterName, testbench.TestNamespace, map[string]string{}, 90*time.Second) + s.Require().NoError(err) + s.Require().Equal(1, len(pods)) + + createdFirehose, err := s.resourceClient.GetResource(s.ctx, &entropyv1beta1.GetResourceRequest{ + Urn: resp.GetResource().Urn, + }) + s.Require().NoError(err) + s.Require().NotNil(createdFirehose) + s.Require().Equal(resource.StatusCompleted, createdFirehose.Resource.State.Status.String()) + }) + + s.Run("running worker with default config will run one worker that takes any job", func() { + resourceConfig, err := getFirehoseResourceRequest() + s.Require().NoError(err) + + resourceConfig.Project = s.resources[1].Project + resourceConfig.Spec.Dependencies = []*entropyv1beta1.ResourceDependency{ + { + Key: "kube_cluster", + Value: s.resources[1].Urn, + }, + } + + resp, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }) + s.Require().NoError(err) + + pods, err := getRunningFirehosePods(s.ctx, s.kubeProvider, testbench.TestClusterName, testbench.TestNamespace, map[string]string{}, 90*time.Second) + s.Require().NoError(err) + s.Require().Equal(2, len(pods)) + + createdFirehose, err := s.resourceClient.GetResource(s.ctx, &entropyv1beta1.GetResourceRequest{ + Urn: resp.GetResource().Urn, + }) + s.Require().NoError(err) + s.Require().NotNil(createdFirehose) + s.Require().Equal(resource.StatusCompleted, createdFirehose.Resource.State.Status.String()) + }) +} + +func (s *WorkerTestSuite) TestWorkerScope() { + projectScope := []string{s.resources[0].Project} + workerConfig := cli.WorkerConfig{ + Count: 1, + Scope: map[string][]string{ + "project": projectScope, + }, + } + + s.appConfig.Syncer.Workers = map[string]cli.WorkerConfig{"test-project-0-worker": workerConfig} + testbench.SetupWorker(s.T(), s.ctx, *s.appConfig) + + s.Run("running worker with project scoped config will run worker(s) that takes configured project job", func() { + resourceConfig, err := getFirehoseResourceRequest() + s.Require().NoError(err) + + resourceConfig.Project = s.resources[0].Project + resourceConfig.Spec.Dependencies = []*entropyv1beta1.ResourceDependency{ + { + Key: "kube_cluster", + Value: s.resources[0].Urn, + }, + } + + resp, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }) + s.Require().NoError(err) + + pods, err := getRunningFirehosePods(s.ctx, s.kubeProvider, testbench.TestClusterName, testbench.TestNamespace, map[string]string{}, 90*time.Second) + s.Require().NoError(err) + s.Require().Equal(1, len(pods)) + + createdFirehose, err := s.resourceClient.GetResource(s.ctx, &entropyv1beta1.GetResourceRequest{ + Urn: resp.GetResource().Urn, + }) + s.Require().NoError(err) + s.Require().NotNil(createdFirehose) + s.Require().Equal(resource.StatusCompleted, createdFirehose.Resource.State.Status.String()) + }) + + s.Run("running worker with project scoped config will run worker(s) that won't takes none configured project job", func() { + resourceConfig, err := getFirehoseResourceRequest() + s.Require().NoError(err) + + resourceConfig.Project = s.resources[1].Project + resourceConfig.Spec.Dependencies = []*entropyv1beta1.ResourceDependency{ + { + Key: "kube_cluster", + Value: s.resources[1].Urn, + }, + } + + resp, err := s.resourceClient.CreateResource(s.ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }) + s.Require().NoError(err) + + pods, err := getRunningFirehosePods(s.ctx, s.kubeProvider, testbench.TestClusterName, testbench.TestNamespace, map[string]string{}, 90*time.Second) + s.Require().NoError(err) + s.Require().Equal(1, len(pods)) + + createdFirehose, err := s.resourceClient.GetResource(s.ctx, &entropyv1beta1.GetResourceRequest{ + Urn: resp.GetResource().Urn, + }) + s.Require().NoError(err) + s.Require().NotNil(createdFirehose) + s.Require().Equal(resource.StatusPending, createdFirehose.Resource.State.Status.String()) + }) +} + +func (s *WorkerTestSuite) TearDownTest() { + if err := s.pool.Purge(s.resource); err != nil { + s.T().Fatal(err) + } + + if err := s.kubeProvider.Delete(testbench.TestClusterName, ""); err != nil { + s.T().Fatal(err) + } + + s.cancel() +} + +func TestWorkerTestSuite(t *testing.T) { + suite.Run(t, new(WorkerTestSuite)) +} diff --git a/test/testbench/bootstrap.go b/test/testbench/bootstrap.go new file mode 100644 index 00000000..054fdc7b --- /dev/null +++ b/test/testbench/bootstrap.go @@ -0,0 +1,153 @@ +package testbench + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "sigs.k8s.io/kind/pkg/cluster" +) + +func BootstrapKubernetesModule(ctx context.Context, client entropyv1beta1.ModuleServiceClient, testDataPath string) error { + moduleData, err := os.ReadFile(testDataPath + "/module/kubernetes_module.json") + if err != nil { + return err + } + + var moduleConfig *entropyv1beta1.Module + if err = json.Unmarshal(moduleData, &moduleConfig); err != nil { + return err + } + + project := moduleConfig.Project + for i := 0; i < 3; i++ { + moduleConfig.Project = fmt.Sprintf("%s-%d", project, i) + if _, err := client.CreateModule(ctx, &entropyv1beta1.CreateModuleRequest{ + Module: moduleConfig, + }); err != nil { + return err + } + } + + return nil +} + +func BootstrapFirehoseModule(ctx context.Context, client entropyv1beta1.ModuleServiceClient, testDataPath string) error { + moduleData, err := os.ReadFile(testDataPath + "/module/firehose_module.json") + if err != nil { + return err + } + + var moduleConfig *entropyv1beta1.Module + if err = json.Unmarshal(moduleData, &moduleConfig); err != nil { + return err + } + + project := moduleConfig.Project + for i := 0; i < 3; i++ { + moduleConfig.Project = fmt.Sprintf("%s-%d", project, i) + + if _, err := client.CreateModule(ctx, &entropyv1beta1.CreateModuleRequest{ + Module: moduleConfig, + }); err != nil { + return err + } + } + + return nil +} + +func BootstrapFlinkModule(ctx context.Context, client entropyv1beta1.ModuleServiceClient, testDataPath string) error { + moduleData, err := os.ReadFile(testDataPath + "/module/flink_module.json") + if err != nil { + return err + } + + var moduleConfig *entropyv1beta1.Module + if err = json.Unmarshal(moduleData, &moduleConfig); err != nil { + return err + } + + project := moduleConfig.Project + for i := 0; i < 3; i++ { + moduleConfig.Project = fmt.Sprintf("%s-%d", project, i) + + if _, err := client.CreateModule(ctx, &entropyv1beta1.CreateModuleRequest{ + Module: moduleConfig, + }); err != nil { + return err + } + } + + return nil +} + +func BootstrapKubernetesResource(ctx context.Context, client entropyv1beta1.ResourceServiceClient, kubeProvider *cluster.Provider, testDataPath string) error { + resourceData, err := os.ReadFile(testDataPath + "/resource/kubernetes_resource.json") + if err != nil { + return err + } + + host, clientCertificate, clientKey, err := GetClusterCredentials(kubeProvider, TestClusterName) + if err != nil { + return err + } + + type Config struct { + Host string `json:"host"` + Insecure bool `json:"insecure"` + Timeout uint `json:"timeout"` + ClientCertificate string `json:"client_certificate"` + ClientKey string `json:"client_key"` + } + + type Spec struct { + Configs Config `json:"configs"` + Depedencies map[string]string `json:"dependencies"` + } + + type SpecConfig struct { + Specs Spec `json:"spec"` + } + + specConfig := SpecConfig{ + Specs: Spec{ + Configs: Config{ + Host: host, + Insecure: true, + ClientCertificate: clientCertificate, + ClientKey: clientKey, + }, + }, + } + + specData, err := json.Marshal(specConfig) + if err != nil { + return err + } + + var resourceConfig *entropyv1beta1.Resource + if err = json.Unmarshal(resourceData, &resourceConfig); err != nil { + return err + } + + if err = json.Unmarshal(specData, &resourceConfig); err != nil { + return err + } + + project := resourceConfig.Project + for i := 0; i < 3; i++ { + resourceConfig.Project = fmt.Sprintf("%s-%d", project, i) + + if _, err := client.CreateResource(ctx, &entropyv1beta1.CreateResourceRequest{ + Resource: resourceConfig, + }); err != nil { + return errors.New(resourceConfig.Spec.Configs.GetStringValue()) + } + } + + return nil +} diff --git a/test/testbench/test_data/module/firehose_module.json b/test/testbench/test_data/module/firehose_module.json new file mode 100644 index 00000000..3531eb24 --- /dev/null +++ b/test/testbench/test_data/module/firehose_module.json @@ -0,0 +1,52 @@ +{ + "name": "firehose", + "project": "test-project", + "configs": { + "env_variables": { + "APPLICATION_THREAD_COUNT": "1", + "ERROR_TYPES_FOR_FAILING": "DESERIALIZATION_ERROR,INVALID_MESSAGE_ERROR,UNKNOWN_FIELDS_ERROR", + "ERROR_TYPES_FOR_RETRY": "DEFAULT_ERROR,SINK_RETRYABLE_ERROR", + "FILTER_DATA_SOURCE": "", + "FILTER_ENGINE": "no_op", + "FILTER_ESB_MESSAGE_FORMAT": "", + "FILTER_JEXL_EXPRESSION": "", + "FILTER_JSON_SCHEMA": "", + "FILTER_SCHEMA_PROTO_CLASS": "", + "INPUT_SCHEMA_DATA_TYPE": "protobuf", + "INPUT_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE": "true", + "INPUT_SCHEMA_PROTO_TO_COLUMN_MAPPING": "", + "JAVA_TOOL_OPTIONS": "-javaagent:jolokia-jvm-agent.jar=port=8778,host=localhost", + "LOG_LEVEL": "INFO", + "RETRY_EXPONENTIAL_BACKOFF_INITIAL_MS": "10", + "RETRY_EXPONENTIAL_BACKOFF_MAX_MS": "60000", + "RETRY_EXPONENTIAL_BACKOFF_RATE": "2", + "RETRY_FAIL_AFTER_MAX_ATTEMPTS_ENABLE": "false", + "RETRY_MAX_ATTEMPTS": "2147483647", + "SINK_ADD_METADATA_ENABLED": "false", + "SOURCE_KAFKA_ASYNC_COMMIT_ENABLE": "true", + "SOURCE_KAFKA_COMMIT_ONLY_CURRENT_PARTITIONS_ENABLE": "true", + "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_COMMIT_ENABLE": "false", + "SOURCE_KAFKA_CONSUMER_CONFIG_AUTO_OFFSET_RESET": "latest", + "SOURCE_KAFKA_CONSUMER_CONFIG_FETCH_MIN_BYTES": "1", + "SOURCE_KAFKA_CONSUMER_CONFIG_MANUAL_COMMIT_MIN_INTERVAL_MS": "5000", + "SOURCE_KAFKA_CONSUMER_CONFIG_MAX_POLL_RECORDS": "500", + "SOURCE_KAFKA_CONSUMER_CONFIG_METADATA_MAX_AGE_MS": "500", + "SOURCE_KAFKA_CONSUMER_CONFIG_PARTITION_ASSIGNMENT_STRATEGY": "org.apache.kafka.clients.consumer.CooperativeStickyAssignor", + "SOURCE_KAFKA_CONSUMER_CONFIG_SESSION_TIMEOUT_MS": "10000", + "SOURCE_KAFKA_CONSUMER_MODE": "sync", + "SOURCE_KAFKA_POLL_TIMEOUT_MS": "9223372036854775807", + "_JAVA_OPTIONS": "-Xmx1250m -Xms1250m" + }, + "init_container": { + "enabled": false + }, + "kube_deploy_timeout_seconds": 60, + "labels": { + "application": "firehose" + }, + "offset_reset_delay_seconds": 10, + "telegraf": { + "enabled": false + } + } +} \ No newline at end of file diff --git a/test/testbench/test_data/module/flink_module.json b/test/testbench/test_data/module/flink_module.json new file mode 100644 index 00000000..7e887305 --- /dev/null +++ b/test/testbench/test_data/module/flink_module.json @@ -0,0 +1,5 @@ +{ + "name": "flink", + "project": "test-project", + "configs": {} +} \ No newline at end of file diff --git a/test/testbench/test_data/module/kafka_module.json b/test/testbench/test_data/module/kafka_module.json new file mode 100644 index 00000000..ee00f8d2 --- /dev/null +++ b/test/testbench/test_data/module/kafka_module.json @@ -0,0 +1,5 @@ +{ + "name": "kafka", + "project": "test-project", + "configs": {} +} \ No newline at end of file diff --git a/test/testbench/test_data/module/kubernetes_module.json b/test/testbench/test_data/module/kubernetes_module.json new file mode 100644 index 00000000..01d4dd79 --- /dev/null +++ b/test/testbench/test_data/module/kubernetes_module.json @@ -0,0 +1,5 @@ +{ + "name": "kubernetes", + "project": "test-project", + "configs": {} +} \ No newline at end of file diff --git a/test/testbench/test_data/resource/firehose_resource.json b/test/testbench/test_data/resource/firehose_resource.json new file mode 100644 index 00000000..b51fb4f9 --- /dev/null +++ b/test/testbench/test_data/resource/firehose_resource.json @@ -0,0 +1,29 @@ +{ + "kind": "firehose", + "name": "test-firehose", + "project": "test-project-0", + "labels": { + "description": "test firehose resource" + }, + "spec": { + "configs": { + "chart_values": { + "image_pull_policy": "IfNotPresent" + }, + "env_variables": { + "SINK_TYPE": "LOG", + "INPUT_SCHEMA_PROTO_CLASS": "com.tests.TestMessage", + "SOURCE_KAFKA_BROKERS": "localhost:9092", + "SOURCE_KAFKA_TOPIC": "test-topic" + }, + "replicas": 1, + "namespace": "default" + }, + "dependencies": [ + { + "key": "kube_cluster", + "value": "orn:entropy:kubernetes:test-project-0:test-kube" + } + ] + } +} \ No newline at end of file diff --git a/test/testbench/test_data/resource/flink_resource.json b/test/testbench/test_data/resource/flink_resource.json new file mode 100644 index 00000000..d68f0a56 --- /dev/null +++ b/test/testbench/test_data/resource/flink_resource.json @@ -0,0 +1,25 @@ +{ + "kind": "flink", + "name": "test-flink", + "project": "test-project-0", + "labels": { + "description": "test flink resource" + }, + "spec": { + "configs": { + "influx": { + "password": "influx-password", + "url": "localhost:1234", + "username": "influx-user" + }, + "kube_namespace": "flink-ns", + "sink_kafka_stream": "flinkstream" + }, + "dependencies": [ + { + "key": "kube_cluster", + "value": "orn:entropy:kubernetes:test-project-0:test-kube" + } + ] + } +} \ No newline at end of file diff --git a/test/testbench/test_data/resource/kafka_resource.json b/test/testbench/test_data/resource/kafka_resource.json new file mode 100644 index 00000000..3a3672a5 --- /dev/null +++ b/test/testbench/test_data/resource/kafka_resource.json @@ -0,0 +1,25 @@ +{ + "kind": "kafka", + "name": "test-kafka", + "project": "test-project", + "labels": { + "description": "test kafka resource" + }, + "spec": { + "configs": { + "advertise_mode": { + "host": "6667", + "address": "6668" + }, + "brokers": [ + { + "name": "test-project-test-kafka-01", + "host": "test-project-test-kafka-01", + "address": "127.0.0.1" + } + ], + "type": "source" + }, + "dependencies": [] + } +} \ No newline at end of file diff --git a/test/testbench/test_data/resource/kubernetes_resource.json b/test/testbench/test_data/resource/kubernetes_resource.json new file mode 100644 index 00000000..7e3e3d69 --- /dev/null +++ b/test/testbench/test_data/resource/kubernetes_resource.json @@ -0,0 +1,8 @@ +{ + "kind": "kubernetes", + "name": "test-kube", + "project": "test-project", + "labels": { + "firehose": "true" + } +} \ No newline at end of file diff --git a/test/testbench/testbench.go b/test/testbench/testbench.go new file mode 100644 index 00000000..e71ea50a --- /dev/null +++ b/test/testbench/testbench.go @@ -0,0 +1,253 @@ +package testbench + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/google/uuid" + "github.com/goto/entropy/cli" + "github.com/goto/entropy/pkg/logger" + entropyv1beta1 "github.com/goto/entropy/proto/gotocompany/entropy/v1beta1" + "github.com/goto/salt/dockertestx" + "github.com/goto/salt/log" + "github.com/ory/dockertest/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/metadata" + "gopkg.in/yaml.v2" + "sigs.k8s.io/kind/pkg/cluster" +) + +var ( + UserIDHeader = "user-id" + zapLogger = log.NewZap() + TestDataPath = "" + TestClusterName = fmt.Sprintf("test-cluster-%s", uuid.New()) + TestNamespace = "default" +) + +func SetupTests(t *testing.T, spawnWorkers bool, setupKube bool) (context.Context, entropyv1beta1.ModuleServiceClient, entropyv1beta1.ResourceServiceClient, *cli.Config, + *dockertest.Pool, *dockertest.Resource, *cluster.Provider, func(), func(), func()) { + t.Helper() + + servicePort, err := getFreePort() + if err != nil { + t.Fatal(err) + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatal(err) + } + + var provider *cluster.Provider + if setupKube { + zapLogger.Info("creating cluster") + provider = cluster.NewProvider() + err = provider.Create(TestClusterName) + if err != nil { + t.Fatal(err) + } + } + + zapLogger.Info("creating postgres") + postgres, err := dockertestx.CreatePostgres(dockertestx.PostgresWithDockerPool(pool), dockertestx.PostgresWithDockertestResourceExpiry(3000)) + if err != nil { + t.Fatal(err) + } + + appConfig := &cli.Config{ + Service: cli.ServeConfig{ + Host: "localhost", + Port: servicePort, + }, + PGConnStr: postgres.GetExternalConnString(), + Log: logger.LogConfig{ + Level: "info", + }, + Syncer: cli.SyncerConf{ + SyncInterval: 10 * time.Second, + RefreshInterval: 15 * time.Second, + ExtendLockBy: 20 * time.Second, + SyncBackoffInterval: 15 * time.Second, + MaxRetries: 5, + }, + } + + ctx, cancel := context.WithCancel(metadata.NewOutgoingContext(context.Background(), metadata.New(map[string]string{ + UserIDHeader: "doe@gotocompany.com", + }))) + + go func() { + if err := cli.StartServer(ctx, *appConfig, true, spawnWorkers); err != nil { + zapLogger.Warn(err.Error()) + } + }() + + zapLogger.Info("creating client") + host := fmt.Sprintf("localhost:%d", appConfig.Service.Port) + moduleClient, cancelModuleClient, err := createModuleClient(ctx, host) + if err != nil { + t.Fatal(err) + } + + resourceClient, cancelResourceClient, err := createResourceClient(ctx, host) + if err != nil { + t.Fatal(err) + } + + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + parent := filepath.Dir(wd) + TestDataPath = parent + "/testbench/test_data/" + + err = BootstrapKubernetesModule(ctx, moduleClient, TestDataPath) + if err != nil { + t.Fatal(err) + } + + err = BootstrapFirehoseModule(ctx, moduleClient, TestDataPath) + if err != nil { + t.Fatal() + } + + err = BootstrapFlinkModule(ctx, moduleClient, TestDataPath) + if err != nil { + t.Fatal() + } + + if setupKube { + err = BootstrapKubernetesResource(ctx, resourceClient, provider, TestDataPath) + if err != nil { + t.Fatal() + } + } + + return ctx, moduleClient, resourceClient, appConfig, pool, postgres.GetResource(), provider, cancelModuleClient, cancelResourceClient, cancel +} + +func SetupWorker(t *testing.T, ctx context.Context, appConfig cli.Config) { + go func() { + if err := cli.StartWorkers(ctx, appConfig); err != nil { + zapLogger.Warn(err.Error()) + } + }() +} + +func getFreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} + +func createResourceClient(ctx context.Context, host string) (entropyv1beta1.ResourceServiceClient, func(), error) { + conn, err := createConnection(ctx, host) + if err != nil { + return nil, nil, err + } + + cancel := func() { + conn.Close() + } + + client := entropyv1beta1.NewResourceServiceClient(conn) + return client, cancel, nil +} + +func createModuleClient(ctx context.Context, host string) (entropyv1beta1.ModuleServiceClient, func(), error) { + conn, err := createConnection(ctx, host) + if err != nil { + return nil, nil, err + } + + cancel := func() { + conn.Close() + } + + client := entropyv1beta1.NewModuleServiceClient(conn) + return client, cancel, nil +} + +func createConnection(ctx context.Context, host string) (*grpc.ClientConn, error) { + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + } + + return grpc.DialContext(ctx, host, opts...) +} + +func GetClusterCredentials(kubeProvider *cluster.Provider, clusterName string) (string, string, string, error) { + strConfig, err := kubeProvider.KubeConfig(clusterName, false) + if err != nil { + return "", "", "", err + } + + type Cluster struct { + Cluster struct { + Server string `yaml:"server"` + } `yaml:"cluster"` + } + + type User struct { + User struct { + ClientCertificateData string `yaml:"client-certificate-data"` + ClientKeyData string `yaml:"client-key-data"` + } `yaml:"user"` + } + + type kubeConfig struct { + Users []User `yaml:"users"` + Cluster []Cluster `yaml:"clusters"` + } + + var config kubeConfig + err = yaml.Unmarshal([]byte(strConfig), &config) + if err != nil { + return "", "", "", err + } + + if len(config.Users) == 0 { + return "", "", "", errors.New("error parsing credentials") + } + + clientCertificate, err := decodeBase64(config.Users[0].User.ClientCertificateData) + if err != nil { + return "", "", "", err + } + + clientKey, err := decodeBase64(config.Users[0].User.ClientKeyData) + if err != nil { + return "", "", "", err + } + + host := config.Cluster[0].Cluster.Server + + return host, clientCertificate, clientKey, nil +} + +func decodeBase64(encoded string) (string, error) { + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + return "", fmt.Errorf("error decoding base64 certificate: %w", err) + } + return string(decoded), nil +}