diff --git a/.features/pending/configmap-sync-cli.md b/.features/pending/configmap-sync-cli.md new file mode 100644 index 000000000000..4da20d13e27a --- /dev/null +++ b/.features/pending/configmap-sync-cli.md @@ -0,0 +1,6 @@ +Description: Add support for creating a configmap semaphore config using CLI +Author: [Darko Janjic](https://github.com/djanjic) +Component: CLI +Issues: 14671 + +Allow user to create a configmap semaphore configuration using CLI diff --git a/.gitignore b/.gitignore index ffe23a96030f..6f5f7872360c 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,7 @@ git-ask-pass.sh /pkg/apiclient/workflow/workflow.swagger.json /pkg/apiclient/workflowarchive/workflow-archive.swagger.json /pkg/apiclient/workflowtemplate/workflow-template.swagger.json +/pkg/apiclient/sync/sync.swagger.json /site/ /.brew_home /go-diagrams/ diff --git a/Makefile b/Makefile index bf714e9ad9a4..974ddd16b7c5 100644 --- a/Makefile +++ b/Makefile @@ -203,7 +203,8 @@ SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \ pkg/apiclient/sensor/sensor.swagger.json \ pkg/apiclient/workflow/workflow.swagger.json \ pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ - pkg/apiclient/workflowtemplate/workflow-template.swagger.json + pkg/apiclient/workflowtemplate/workflow-template.swagger.json \ + pkg/apiclient/sync/sync.swagger.json PROTO_BINARIES := $(TOOL_PROTOC_GEN_GOGO) $(TOOL_PROTOC_GEN_GOGOFAST) $(TOOL_GOIMPORTS) $(TOOL_PROTOC_GEN_GRPC_GATEWAY) $(TOOL_PROTOC_GEN_SWAGGER) $(TOOL_CLANG_FORMAT) GENERATED_DOCS := docs/fields.md docs/cli/argo.md docs/workflow-controller-configmap.md @@ -359,6 +360,7 @@ swagger: \ pkg/apiclient/workflow/workflow.swagger.json \ pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ pkg/apiclient/workflowtemplate/workflow-template.swagger.json \ + pkg/apiclient/sync/sync.swagger.json \ manifests/base/crds/full/argoproj.io_workflows.yaml \ manifests \ api/openapi-spec/swagger.json \ @@ -483,6 +485,9 @@ pkg/apiclient/workflowarchive/workflow-archive.swagger.json: $(PROTO_BINARIES) $ pkg/apiclient/workflowtemplate/workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowtemplate/workflow-template.proto $(call protoc,pkg/apiclient/workflowtemplate/workflow-template.proto) +pkg/apiclient/sync/sync.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/sync/sync.proto + $(call protoc,pkg/apiclient/sync/sync.proto) + # generate other files for other CRDs manifests/base/crds/full/argoproj.io_workflows.yaml: $(TOOL_CONTROLLER_GEN) $(TYPES) ./hack/manifests/crdgen.sh ./hack/manifests/crds.go ./hack/manifests/crdgen.sh diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json index 8b37f64b8af2..e15a29914270 100644 --- a/api/jsonschema/schema.json +++ b/api/jsonschema/schema.json @@ -11693,6 +11693,77 @@ } }, "type": "object" + }, + "sync.CreateSyncLimitRequest": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "sizeLimit": { + "type": "integer" + }, + "type": { + "$ref": "#/definitions/sync.SyncConfigType" + } + }, + "type": "object" + }, + "sync.DeleteSyncLimitResponse": { + "type": "object" + }, + "sync.SyncConfigType": { + "default": "CONFIG_MAP", + "enum": [ + "CONFIG_MAP", + "DATABASE" + ], + "type": "string" + }, + "sync.SyncLimitResponse": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "sizeLimit": { + "type": "integer" + }, + "type": { + "$ref": "#/definitions/sync.SyncConfigType" + } + }, + "type": "object" + }, + "sync.UpdateSyncLimitRequest": { + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "sizeLimit": { + "type": "integer" + }, + "type": { + "$ref": "#/definitions/sync.SyncConfigType" + } + }, + "type": "object" } }, "oneOf": [ diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 34ba15daf528..3366d4cc8814 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2318,6 +2318,186 @@ } } }, + "/api/v1/sync/{namespace}": { + "post": { + "tags": [ + "SyncService" + ], + "operationId": "SyncService_CreateSyncLimit", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/sync.CreateSyncLimitRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sync.SyncLimitResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, + "/api/v1/sync/{namespace}/{key}": { + "get": { + "tags": [ + "SyncService" + ], + "operationId": "SyncService_GetSyncLimit", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "key", + "in": "path", + "required": true + }, + { + "enum": [ + "CONFIG_MAP", + "DATABASE" + ], + "type": "string", + "default": "CONFIG_MAP", + "name": "type", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sync.SyncLimitResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + }, + "put": { + "tags": [ + "SyncService" + ], + "operationId": "SyncService_UpdateSyncLimit", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "key", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/sync.UpdateSyncLimitRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sync.SyncLimitResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + }, + "delete": { + "tags": [ + "SyncService" + ], + "operationId": "SyncService_DeleteSyncLimit", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "key", + "in": "path", + "required": true + }, + { + "enum": [ + "CONFIG_MAP", + "DATABASE" + ], + "type": "string", + "default": "CONFIG_MAP", + "name": "type", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sync.DeleteSyncLimitResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/grpc.gateway.runtime.Error" + } + } + } + } + }, "/api/v1/tracking/event": { "post": { "tags": [ @@ -15766,6 +15946,77 @@ "$ref": "#/definitions/github.amrom.workers.dev.argoproj.argo_events.pkg.apis.events.v1alpha1.Sensor" } } + }, + "sync.CreateSyncLimitRequest": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "sizeLimit": { + "type": "integer" + }, + "type": { + "$ref": "#/definitions/sync.SyncConfigType" + } + } + }, + "sync.DeleteSyncLimitResponse": { + "type": "object" + }, + "sync.SyncConfigType": { + "type": "string", + "default": "CONFIG_MAP", + "enum": [ + "CONFIG_MAP", + "DATABASE" + ] + }, + "sync.SyncLimitResponse": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "sizeLimit": { + "type": "integer" + }, + "type": { + "$ref": "#/definitions/sync.SyncConfigType" + } + } + }, + "sync.UpdateSyncLimitRequest": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "sizeLimit": { + "type": "integer" + }, + "type": { + "$ref": "#/definitions/sync.SyncConfigType" + } + } } }, "securityDefinitions": { diff --git a/cmd/argo/commands/root.go b/cmd/argo/commands/root.go index 4f966e64798d..b7c62cb420bd 100644 --- a/cmd/argo/commands/root.go +++ b/cmd/argo/commands/root.go @@ -16,7 +16,9 @@ import ( "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/clustertemplate" "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/cron" "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/executorplugin" + "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/sync" "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/template" + cmdutil "github.com/argoproj/argo-workflows/v3/util/cmd" grpcutil "github.com/argoproj/argo-workflows/v3/util/grpc" "github.com/argoproj/argo-workflows/v3/util/logging" @@ -118,6 +120,7 @@ If your server is behind an ingress with a path (running "argo server --base-hre command.AddCommand(cron.NewCronWorkflowCommand()) command.AddCommand(clustertemplate.NewClusterTemplateCommand()) command.AddCommand(executorplugin.NewRootCommand()) + command.AddCommand(sync.NewSyncCommand()) client.AddKubectlFlagsToCmd(command) client.AddAPIClientFlagsToCmd(command) diff --git a/cmd/argo/commands/sync/configmap/create.go b/cmd/argo/commands/sync/configmap/create.go new file mode 100644 index 000000000000..b91b7f9c9468 --- /dev/null +++ b/cmd/argo/commands/sync/configmap/create.go @@ -0,0 +1,72 @@ +package sync + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" + "github.com/argoproj/argo-workflows/v3/util/errors" +) + +type cliCreateOpts struct { + key string // --key + sizeLimit int32 // --size-limit +} + +func NewCreateCommand() *cobra.Command { + + var cliCreateOpts = cliCreateOpts{} + + command := &cobra.Command{ + Use: "create", + Short: "Create a configmap sync limit", + Args: cobra.ExactArgs(1), + Example: `argo sync configmap create my-cm --key my-key --size-limit 10`, + RunE: func(cmd *cobra.Command, args []string) error { + return CreateSyncLimitCommand(cmd.Context(), args[0], &cliCreateOpts) + }, + } + + command.Flags().StringVar(&cliCreateOpts.key, "key", "", "Key of the sync limit") + command.Flags().Int32Var(&cliCreateOpts.sizeLimit, "size-limit", 0, "Size limit of the sync limit") + + ctx := command.Context() + err := command.MarkFlagRequired("key") + errors.CheckError(ctx, err) + + err = command.MarkFlagRequired("size-limit") + errors.CheckError(ctx, err) + + return command +} + +func CreateSyncLimitCommand(ctx context.Context, cmName string, cliOpts *cliCreateOpts) error { + ctx, apiClient, err := client.NewAPIClient(ctx) + if err != nil { + return err + } + serviceClient, err := apiClient.NewSyncServiceClient() + if err != nil { + return err + } + + req := &syncpkg.CreateSyncLimitRequest{ + Name: cmName, + Namespace: client.Namespace(ctx), + Key: cliOpts.key, + SizeLimit: cliOpts.sizeLimit, + Type: syncpkg.SyncConfigType_CONFIG_MAP, + } + + resp, err := serviceClient.CreateSyncLimit(ctx, req) + if err != nil { + return fmt.Errorf("failed to create sync limit: %v", err) + } + + fmt.Printf("Configmap sync limit created: %s/%s with key %s and size limit %d\n", resp.Namespace, resp.Name, resp.Key, resp.SizeLimit) + + return nil +} diff --git a/cmd/argo/commands/sync/configmap/delete.go b/cmd/argo/commands/sync/configmap/delete.go new file mode 100644 index 000000000000..d01085a199ae --- /dev/null +++ b/cmd/argo/commands/sync/configmap/delete.go @@ -0,0 +1,63 @@ +package sync + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" + "github.com/argoproj/argo-workflows/v3/util/errors" +) + +type cliDeleteOpts struct { + key string // --key +} + +func NewDeleteCommand() *cobra.Command { + var cliDeleteOpts = cliDeleteOpts{} + + command := &cobra.Command{ + Use: "delete", + Short: "Delete a configmap sync limit", + Args: cobra.ExactArgs(1), + Example: `argo sync configmap delete my-cm --key my-key`, + RunE: func(cmd *cobra.Command, args []string) error { + return DeleteSyncLimitCommand(cmd.Context(), args[0], &cliDeleteOpts) + }, + } + + command.Flags().StringVar(&cliDeleteOpts.key, "key", "", "Key of the sync limit") + + err := command.MarkFlagRequired("key") + errors.CheckError(command.Context(), err) + + return command +} + +func DeleteSyncLimitCommand(ctx context.Context, cmName string, cliDeleteOpts *cliDeleteOpts) error { + ctx, apiClient, err := client.NewAPIClient(ctx) + if err != nil { + return err + } + serviceClient, err := apiClient.NewSyncServiceClient() + if err != nil { + return err + } + + namespace := client.Namespace(ctx) + req := &syncpkg.DeleteSyncLimitRequest{ + Name: cmName, + Namespace: namespace, + Key: cliDeleteOpts.key, + Type: syncpkg.SyncConfigType_CONFIG_MAP, + } + + if _, err := serviceClient.DeleteSyncLimit(ctx, req); err != nil { + return fmt.Errorf("failed to delete sync limit: %v", err) + } + + fmt.Printf("Deleted sync limit for ConfigMap %s from %s namespace with key %s\n", cmName, namespace, cliDeleteOpts.key) + return nil +} diff --git a/cmd/argo/commands/sync/configmap/get.go b/cmd/argo/commands/sync/configmap/get.go new file mode 100644 index 000000000000..fbef4c21e8b9 --- /dev/null +++ b/cmd/argo/commands/sync/configmap/get.go @@ -0,0 +1,62 @@ +package sync + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" + "github.com/argoproj/argo-workflows/v3/util/errors" +) + +type cliGetOpts struct { + key string // --key +} + +func NewGetCommand() *cobra.Command { + var cliGetOpts = cliGetOpts{} + command := &cobra.Command{ + Use: "get", + Short: "Get a configmap sync limit", + Args: cobra.ExactArgs(1), + Example: `argo sync configmap get my-cm --key my-key`, + RunE: func(cmd *cobra.Command, args []string) error { + return GetSyncLimitCommand(cmd.Context(), args[0], &cliGetOpts) + }, + } + + command.Flags().StringVar(&cliGetOpts.key, "key", "", "Key of the sync limit") + + err := command.MarkFlagRequired("key") + errors.CheckError(command.Context(), err) + + return command +} + +func GetSyncLimitCommand(ctx context.Context, cmName string, cliGetOpts *cliGetOpts) error { + ctx, apiClient, err := client.NewAPIClient(ctx) + if err != nil { + return err + } + serviceClient, err := apiClient.NewSyncServiceClient() + if err != nil { + return err + } + + req := &syncpkg.GetSyncLimitRequest{ + Name: cmName, + Namespace: client.Namespace(ctx), + Key: cliGetOpts.key, + Type: syncpkg.SyncConfigType_CONFIG_MAP, + } + + resp, err := serviceClient.GetSyncLimit(ctx, req) + if err != nil { + return fmt.Errorf("failed to get sync limit: %v", err) + } + + fmt.Printf("Sync Configmap name: %s\nNamespace: %s\nKey: %s\nSize Limit: %d\n", resp.Name, resp.Namespace, resp.Key, resp.SizeLimit) + return nil +} diff --git a/cmd/argo/commands/sync/configmap/root.go b/cmd/argo/commands/sync/configmap/root.go new file mode 100644 index 000000000000..22c59caae2ec --- /dev/null +++ b/cmd/argo/commands/sync/configmap/root.go @@ -0,0 +1,23 @@ +package sync + +import ( + "github.com/spf13/cobra" +) + +func NewConfigmapCommand() *cobra.Command { + command := &cobra.Command{ + Use: "configmap", + Aliases: []string{"cm"}, + Short: "manage configmap sync limits", + RunE: func(cmd *cobra.Command, args []string) error { + return cmd.Help() + }, + } + + command.AddCommand(NewCreateCommand()) + command.AddCommand(NewGetCommand()) + command.AddCommand(NewDeleteCommand()) + command.AddCommand(NewUpdateCommand()) + + return command +} diff --git a/cmd/argo/commands/sync/configmap/update.go b/cmd/argo/commands/sync/configmap/update.go new file mode 100644 index 000000000000..75ed341fccb3 --- /dev/null +++ b/cmd/argo/commands/sync/configmap/update.go @@ -0,0 +1,70 @@ +package sync + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" + "github.com/argoproj/argo-workflows/v3/util/errors" +) + +type cliUpdateOpts struct { + key string // --key + sizeLimit int32 // --size-limit +} + +func NewUpdateCommand() *cobra.Command { + var cliUpdateOpts = cliUpdateOpts{} + + command := &cobra.Command{ + Use: "update", + Short: "Update a configmap sync limit", + Args: cobra.ExactArgs(1), + Example: `argo sync configmap update my-cm --key my-key --size-limit 20`, + RunE: func(cmd *cobra.Command, args []string) error { + return UpdateSyncLimitCommand(cmd.Context(), args[0], &cliUpdateOpts) + }, + } + + command.Flags().StringVar(&cliUpdateOpts.key, "key", "", "Key of the sync limit") + command.Flags().Int32Var(&cliUpdateOpts.sizeLimit, "size-limit", 0, "Size limit of the sync limit") + + ctx := command.Context() + err := command.MarkFlagRequired("key") + errors.CheckError(ctx, err) + + err = command.MarkFlagRequired("size-limit") + errors.CheckError(ctx, err) + + return command +} + +func UpdateSyncLimitCommand(ctx context.Context, cmName string, cliOpts *cliUpdateOpts) error { + ctx, apiClient, err := client.NewAPIClient(ctx) + if err != nil { + return err + } + serviceClient, err := apiClient.NewSyncServiceClient() + if err != nil { + return err + } + + req := &syncpkg.UpdateSyncLimitRequest{ + Name: cmName, + Namespace: client.Namespace(ctx), + Key: cliOpts.key, + SizeLimit: cliOpts.sizeLimit, + Type: syncpkg.SyncConfigType_CONFIG_MAP, + } + + resp, err := serviceClient.UpdateSyncLimit(ctx, req) + if err != nil { + return fmt.Errorf("failed to update sync limit: %v", err) + } + + fmt.Printf("Updated sync limit for ConfigMap %s from namespace %s with key %s to size limit %d\n", resp.Name, resp.Namespace, resp.Key, resp.SizeLimit) + return nil +} diff --git a/cmd/argo/commands/sync/root.go b/cmd/argo/commands/sync/root.go new file mode 100644 index 000000000000..a2441bff05de --- /dev/null +++ b/cmd/argo/commands/sync/root.go @@ -0,0 +1,21 @@ +package sync + +import ( + "github.com/spf13/cobra" + + configmap "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/sync/configmap" +) + +func NewSyncCommand() *cobra.Command { + command := &cobra.Command{ + Use: "sync", + Short: "manage sync limits", + RunE: func(cmd *cobra.Command, args []string) error { + return cmd.Help() + }, + } + + command.AddCommand(configmap.NewConfigmapCommand()) + + return command +} diff --git a/docs/cli/argo.md b/docs/cli/argo.md index 7a91259cf92c..9026f4633590 100644 --- a/docs/cli/argo.md +++ b/docs/cli/argo.md @@ -125,6 +125,7 @@ argo [flags] * [argo stop](argo_stop.md) - stop zero or more workflows allowing all exit handlers to run * [argo submit](argo_submit.md) - submit a workflow * [argo suspend](argo_suspend.md) - suspend zero or more workflows (opposite of resume) +* [argo sync](argo_sync.md) - manage sync limits * [argo template](argo_template.md) - manipulate workflow templates * [argo terminate](argo_terminate.md) - terminate zero or more workflows immediately * [argo version](argo_version.md) - print version information diff --git a/docs/cli/argo_sync.md b/docs/cli/argo_sync.md new file mode 100644 index 000000000000..d3e0d99a9717 --- /dev/null +++ b/docs/cli/argo_sync.md @@ -0,0 +1,55 @@ +## argo sync + +manage sync limits + +``` +argo sync [flags] +``` + +### Options + +``` + -h, --help help for sync +``` + +### Options inherited from parent commands + +``` + --argo-base-href string Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable. + --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable. + -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable. + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --gloglevel int Set the glog logging level + -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable. + --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable. + --kubeconfig string Path to a kube config. Only required if out-of-cluster + --log-format string The formatter to use for logs. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true) + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server + -v, --verbose Enabled verbose logging, i.e. --loglevel debug +``` + +### SEE ALSO + +* [argo](argo.md) - argo is the command line interface to Argo +* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits + diff --git a/docs/cli/argo_sync_configmap.md b/docs/cli/argo_sync_configmap.md new file mode 100644 index 000000000000..9ca9e7ceb5b3 --- /dev/null +++ b/docs/cli/argo_sync_configmap.md @@ -0,0 +1,58 @@ +## argo sync configmap + +manage configmap sync limits + +``` +argo sync configmap [flags] +``` + +### Options + +``` + -h, --help help for configmap +``` + +### Options inherited from parent commands + +``` + --argo-base-href string Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable. + --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable. + -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable. + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --gloglevel int Set the glog logging level + -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable. + --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable. + --kubeconfig string Path to a kube config. Only required if out-of-cluster + --log-format string The formatter to use for logs. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true) + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server + -v, --verbose Enabled verbose logging, i.e. --loglevel debug +``` + +### SEE ALSO + +* [argo sync](argo_sync.md) - manage sync limits +* [argo sync configmap create](argo_sync_configmap_create.md) - Create a configmap sync limit +* [argo sync configmap delete](argo_sync_configmap_delete.md) - Delete a configmap sync limit +* [argo sync configmap get](argo_sync_configmap_get.md) - Get a configmap sync limit +* [argo sync configmap update](argo_sync_configmap_update.md) - Update a configmap sync limit + diff --git a/docs/cli/argo_sync_configmap_create.md b/docs/cli/argo_sync_configmap_create.md new file mode 100644 index 000000000000..185b5e2ef61a --- /dev/null +++ b/docs/cli/argo_sync_configmap_create.md @@ -0,0 +1,62 @@ +## argo sync configmap create + +Create a configmap sync limit + +``` +argo sync configmap create [flags] +``` + +### Examples + +``` +argo sync configmap create my-cm --key my-key --size-limit 10 +``` + +### Options + +``` + -h, --help help for create + --key string Key of the sync limit + --size-limit int32 Size limit of the sync limit +``` + +### Options inherited from parent commands + +``` + --argo-base-href string Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable. + --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable. + -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable. + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --gloglevel int Set the glog logging level + -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable. + --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable. + --kubeconfig string Path to a kube config. Only required if out-of-cluster + --log-format string The formatter to use for logs. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true) + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server + -v, --verbose Enabled verbose logging, i.e. --loglevel debug +``` + +### SEE ALSO + +* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits + diff --git a/docs/cli/argo_sync_configmap_delete.md b/docs/cli/argo_sync_configmap_delete.md new file mode 100644 index 000000000000..8b24809ffadd --- /dev/null +++ b/docs/cli/argo_sync_configmap_delete.md @@ -0,0 +1,61 @@ +## argo sync configmap delete + +Delete a configmap sync limit + +``` +argo sync configmap delete [flags] +``` + +### Examples + +``` +argo sync configmap delete my-cm --key my-key +``` + +### Options + +``` + -h, --help help for delete + --key string Key of the sync limit +``` + +### Options inherited from parent commands + +``` + --argo-base-href string Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable. + --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable. + -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable. + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --gloglevel int Set the glog logging level + -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable. + --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable. + --kubeconfig string Path to a kube config. Only required if out-of-cluster + --log-format string The formatter to use for logs. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true) + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server + -v, --verbose Enabled verbose logging, i.e. --loglevel debug +``` + +### SEE ALSO + +* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits + diff --git a/docs/cli/argo_sync_configmap_get.md b/docs/cli/argo_sync_configmap_get.md new file mode 100644 index 000000000000..be7666b0a10b --- /dev/null +++ b/docs/cli/argo_sync_configmap_get.md @@ -0,0 +1,61 @@ +## argo sync configmap get + +Get a configmap sync limit + +``` +argo sync configmap get [flags] +``` + +### Examples + +``` +argo sync configmap get my-cm --key my-key +``` + +### Options + +``` + -h, --help help for get + --key string Key of the sync limit +``` + +### Options inherited from parent commands + +``` + --argo-base-href string Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable. + --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable. + -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable. + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --gloglevel int Set the glog logging level + -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable. + --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable. + --kubeconfig string Path to a kube config. Only required if out-of-cluster + --log-format string The formatter to use for logs. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true) + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server + -v, --verbose Enabled verbose logging, i.e. --loglevel debug +``` + +### SEE ALSO + +* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits + diff --git a/docs/cli/argo_sync_configmap_update.md b/docs/cli/argo_sync_configmap_update.md new file mode 100644 index 000000000000..d7370ccab3d7 --- /dev/null +++ b/docs/cli/argo_sync_configmap_update.md @@ -0,0 +1,62 @@ +## argo sync configmap update + +Update a configmap sync limit + +``` +argo sync configmap update [flags] +``` + +### Examples + +``` +argo sync configmap update my-cm --key my-key --size-limit 20 +``` + +### Options + +``` + -h, --help help for update + --key string Key of the sync limit + --size-limit int32 Size limit of the sync limit +``` + +### Options inherited from parent commands + +``` + --argo-base-href string Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable. + --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable. + -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable. + --as string Username to impersonate for the operation + --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. + --as-uid string UID to impersonate for the operation + --certificate-authority string Path to a cert file for the certificate authority + --client-certificate string Path to a client certificate file for TLS + --client-key string Path to a client key file for TLS + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server + --gloglevel int Set the glog logging level + -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true. + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable. + --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable. + --kubeconfig string Path to a kube config. Only required if out-of-cluster + --log-format string The formatter to use for logs. One of: text|json (default "text") + --loglevel string Set the logging level. One of: debug|info|warn|error (default "info") + -n, --namespace string If present, the namespace scope for this CLI request + --password string Password for basic authentication to the API server + --proxy-url string If provided, this URL will be used to connect via proxy + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true) + --server string The address and port of the Kubernetes API server + --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used. + --token string Bearer token for authentication to the API server + --user string The name of the kubeconfig user to use + --username string Username for basic authentication to the API server + -v, --verbose Enabled verbose logging, i.e. --loglevel debug +``` + +### SEE ALSO + +* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits + diff --git a/manifests/cluster-install-no-crds/argo-server-rbac/argo-server-clusterole.yaml b/manifests/cluster-install-no-crds/argo-server-rbac/argo-server-clusterole.yaml index 0882c9a7b9c9..e6206590464c 100644 --- a/manifests/cluster-install-no-crds/argo-server-rbac/argo-server-clusterole.yaml +++ b/manifests/cluster-install-no-crds/argo-server-rbac/argo-server-clusterole.yaml @@ -11,6 +11,8 @@ rules: - get - watch - list + - create + - update - apiGroups: - "" resources: diff --git a/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml b/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml index 314177a3ef8e..fbd9b1f5bddd 100644 --- a/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml +++ b/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml @@ -11,6 +11,8 @@ rules: - get - watch - list + - create + - update - apiGroups: - "" resources: diff --git a/manifests/quick-start-minimal.yaml b/manifests/quick-start-minimal.yaml index d0435b453d28..5ef470cb28a8 100644 --- a/manifests/quick-start-minimal.yaml +++ b/manifests/quick-start-minimal.yaml @@ -5204,6 +5204,8 @@ rules: - get - watch - list + - create + - update - apiGroups: - "" resources: diff --git a/manifests/quick-start-mysql.yaml b/manifests/quick-start-mysql.yaml index 014e226dce9a..1eec707c19ae 100644 --- a/manifests/quick-start-mysql.yaml +++ b/manifests/quick-start-mysql.yaml @@ -5204,6 +5204,8 @@ rules: - get - watch - list + - create + - update - apiGroups: - "" resources: diff --git a/manifests/quick-start-postgres.yaml b/manifests/quick-start-postgres.yaml index e9f68f5fd9a7..1d737b873e42 100644 --- a/manifests/quick-start-postgres.yaml +++ b/manifests/quick-start-postgres.yaml @@ -5204,6 +5204,8 @@ rules: - get - watch - list + - create + - update - apiGroups: - "" resources: diff --git a/mkdocs.yml b/mkdocs.yml index 4fe736054d69..450baeba6bbf 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -228,6 +228,12 @@ nav: - argo stop: cli/argo_stop.md - argo submit: cli/argo_submit.md - argo suspend: cli/argo_suspend.md + - argo sync: cli/argo_sync.md + - argo sync configmap: cli/argo_sync_configmap.md + - argo sync configmap create: cli/argo_sync_configmap_create.md + - argo sync configmap delete: cli/argo_sync_configmap_delete.md + - argo sync configmap get: cli/argo_sync_configmap_get.md + - argo sync configmap update: cli/argo_sync_configmap_update.md - argo template: cli/argo_template.md - argo template create: cli/argo_template_create.md - argo template delete: cli/argo_template_delete.md diff --git a/pkg/apiclient/apiclient.go b/pkg/apiclient/apiclient.go index b0bce31ca34a..1092992bbea7 100644 --- a/pkg/apiclient/apiclient.go +++ b/pkg/apiclient/apiclient.go @@ -9,6 +9,7 @@ import ( clusterworkflowtmplpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow" infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" workflowtemplatepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate" @@ -23,6 +24,7 @@ type Client interface { NewWorkflowTemplateServiceClient() (workflowtemplatepkg.WorkflowTemplateServiceClient, error) NewClusterWorkflowTemplateServiceClient() (clusterworkflowtmplpkg.ClusterWorkflowTemplateServiceClient, error) NewInfoServiceClient() (infopkg.InfoServiceClient, error) + NewSyncServiceClient() (syncpkg.SyncServiceClient, error) } type Opts struct { diff --git a/pkg/apiclient/argo-kube-client.go b/pkg/apiclient/argo-kube-client.go index 8fe3b13903c9..4b8df41d73c2 100644 --- a/pkg/apiclient/argo-kube-client.go +++ b/pkg/apiclient/argo-kube-client.go @@ -15,6 +15,7 @@ import ( "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow" infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate" @@ -22,6 +23,7 @@ import ( "github.com/argoproj/argo-workflows/v3/server/auth" clusterworkflowtmplserver "github.com/argoproj/argo-workflows/v3/server/clusterworkflowtemplate" cronworkflowserver "github.com/argoproj/argo-workflows/v3/server/cronworkflow" + syncserver "github.com/argoproj/argo-workflows/v3/server/sync" "github.com/argoproj/argo-workflows/v3/server/types" workflowserver "github.com/argoproj/argo-workflows/v3/server/workflow" "github.com/argoproj/argo-workflows/v3/server/workflow/store" @@ -201,3 +203,7 @@ func (a *argoKubeClient) NewInfoServiceClient() (infopkg.InfoServiceClient, erro func (a *argoKubeClient) NewClusterWorkflowTemplateServiceClient() (clusterworkflowtemplate.ClusterWorkflowTemplateServiceClient, error) { return &errorTranslatingWorkflowClusterTemplateServiceClient{&argoKubeWorkflowClusterTemplateServiceClient{clusterworkflowtmplserver.NewClusterWorkflowTemplateServer(a.instanceIDService, a.cwfTmplStore, nil)}}, nil } + +func (a *argoKubeClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) { + return &errorTranslatingArgoKubeSyncServiceClient{&argoKubeSyncServiceClient{syncserver.NewSyncServer()}}, nil +} diff --git a/pkg/apiclient/argo-kube-sync-service-client.go b/pkg/apiclient/argo-kube-sync-service-client.go new file mode 100644 index 000000000000..7204795ae1ab --- /dev/null +++ b/pkg/apiclient/argo-kube-sync-service-client.go @@ -0,0 +1,31 @@ +package apiclient + +import ( + "context" + + "google.golang.org/grpc" + + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" +) + +type argoKubeSyncServiceClient struct { + delegate syncpkg.SyncServiceServer +} + +var _ syncpkg.SyncServiceClient = &argoKubeSyncServiceClient{} + +func (a *argoKubeSyncServiceClient) CreateSyncLimit(ctx context.Context, in *syncpkg.CreateSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + return a.delegate.CreateSyncLimit(ctx, in) +} + +func (a *argoKubeSyncServiceClient) DeleteSyncLimit(ctx context.Context, in *syncpkg.DeleteSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.DeleteSyncLimitResponse, error) { + return a.delegate.DeleteSyncLimit(ctx, in) +} + +func (a *argoKubeSyncServiceClient) GetSyncLimit(ctx context.Context, in *syncpkg.GetSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + return a.delegate.GetSyncLimit(ctx, in) +} + +func (a *argoKubeSyncServiceClient) UpdateSyncLimit(ctx context.Context, in *syncpkg.UpdateSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + return a.delegate.UpdateSyncLimit(ctx, in) +} diff --git a/pkg/apiclient/argo-server-client.go b/pkg/apiclient/argo-server-client.go index d4d429f60b8a..4468237f819e 100644 --- a/pkg/apiclient/argo-server-client.go +++ b/pkg/apiclient/argo-server-client.go @@ -12,6 +12,7 @@ import ( clusterworkflowtmplpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow" infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" workflowtemplatepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate" @@ -62,6 +63,10 @@ func (a *argoServerClient) NewInfoServiceClient() (infopkg.InfoServiceClient, er return infopkg.NewInfoServiceClient(a.ClientConn), nil } +func (a *argoServerClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) { + return syncpkg.NewSyncServiceClient(a.ClientConn), nil +} + func newClientConn(opts ArgoServerOpts) (*grpc.ClientConn, error) { creds := grpc.WithTransportCredentials(insecure.NewCredentials()) if opts.Secure { diff --git a/pkg/apiclient/error-translating-sync-service-client.go b/pkg/apiclient/error-translating-sync-service-client.go new file mode 100644 index 000000000000..094e61422e34 --- /dev/null +++ b/pkg/apiclient/error-translating-sync-service-client.go @@ -0,0 +1,36 @@ +package apiclient + +import ( + "context" + + "google.golang.org/grpc" + + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" + grpcutil "github.com/argoproj/argo-workflows/v3/util/grpc" +) + +type errorTranslatingArgoKubeSyncServiceClient struct { + delegate syncpkg.SyncServiceClient +} + +var _ syncpkg.SyncServiceClient = &errorTranslatingArgoKubeSyncServiceClient{} + +func (e *errorTranslatingArgoKubeSyncServiceClient) CreateSyncLimit(ctx context.Context, in *syncpkg.CreateSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + syncLimit, err := e.delegate.CreateSyncLimit(ctx, in, opts...) + return syncLimit, grpcutil.TranslateError(err) +} + +func (e *errorTranslatingArgoKubeSyncServiceClient) DeleteSyncLimit(ctx context.Context, in *syncpkg.DeleteSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.DeleteSyncLimitResponse, error) { + deleteResp, err := e.delegate.DeleteSyncLimit(ctx, in, opts...) + return deleteResp, grpcutil.TranslateError(err) +} + +func (e *errorTranslatingArgoKubeSyncServiceClient) GetSyncLimit(ctx context.Context, in *syncpkg.GetSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + syncLimit, err := e.delegate.GetSyncLimit(ctx, in, opts...) + return syncLimit, grpcutil.TranslateError(err) +} + +func (e *errorTranslatingArgoKubeSyncServiceClient) UpdateSyncLimit(ctx context.Context, in *syncpkg.UpdateSyncLimitRequest, opts ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + syncLimit, err := e.delegate.UpdateSyncLimit(ctx, in, opts...) + return syncLimit, grpcutil.TranslateError(err) +} diff --git a/pkg/apiclient/http1-client.go b/pkg/apiclient/http1-client.go index f9e533fec7dd..92943375acca 100644 --- a/pkg/apiclient/http1-client.go +++ b/pkg/apiclient/http1-client.go @@ -8,6 +8,7 @@ import ( cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/http1" infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" workflowtemplatepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate" @@ -41,6 +42,10 @@ func (h httpClient) NewInfoServiceClient() (infopkg.InfoServiceClient, error) { return http1.InfoServiceClient(h), nil } +func (h httpClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) { + return http1.SyncServiceClient(h), nil +} + func newHTTP1Client(ctx context.Context, baseURL string, auth string, insecureSkipVerify bool, headers []string, customHTTPClient *http.Client) (context.Context, Client, error) { return ctx, httpClient(http1.NewFacade(baseURL, auth, insecureSkipVerify, headers, customHTTPClient)), nil } diff --git a/pkg/apiclient/http1/sync-service-client.go b/pkg/apiclient/http1/sync-service-client.go new file mode 100644 index 000000000000..e1f90f47d15f --- /dev/null +++ b/pkg/apiclient/http1/sync-service-client.go @@ -0,0 +1,31 @@ +package http1 + +import ( + "context" + + "google.golang.org/grpc" + + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" +) + +type SyncServiceClient = Facade + +func (h SyncServiceClient) GetSyncLimit(ctx context.Context, in *syncpkg.GetSyncLimitRequest, _ ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + out := &syncpkg.SyncLimitResponse{} + return out, h.Get(ctx, in, out, "/api/v1/sync/{namespace}/{key}") +} + +func (h SyncServiceClient) CreateSyncLimit(ctx context.Context, in *syncpkg.CreateSyncLimitRequest, _ ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + out := &syncpkg.SyncLimitResponse{} + return out, h.Post(ctx, in, out, "/api/v1/sync/{namespace}") +} + +func (h SyncServiceClient) DeleteSyncLimit(ctx context.Context, in *syncpkg.DeleteSyncLimitRequest, _ ...grpc.CallOption) (*syncpkg.DeleteSyncLimitResponse, error) { + out := &syncpkg.DeleteSyncLimitResponse{} + return out, h.Delete(ctx, in, out, "/api/v1/sync/{namespace}/{key}") +} + +func (h SyncServiceClient) UpdateSyncLimit(ctx context.Context, in *syncpkg.UpdateSyncLimitRequest, _ ...grpc.CallOption) (*syncpkg.SyncLimitResponse, error) { + out := &syncpkg.SyncLimitResponse{} + return out, h.Put(ctx, in, out, "/api/v1/sync/{namespace}/{key}") +} diff --git a/pkg/apiclient/offline-client.go b/pkg/apiclient/offline-client.go index 77dcdaa8c790..a9552dce29a0 100644 --- a/pkg/apiclient/offline-client.go +++ b/pkg/apiclient/offline-client.go @@ -7,6 +7,7 @@ import ( "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow" infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate" @@ -135,6 +136,10 @@ func (c *offlineClient) NewInfoServiceClient() (infopkg.InfoServiceClient, error return nil, ErrNoArgoServer } +func (c *offlineClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) { + return nil, ErrNoArgoServer +} + type offlineWorkflowTemplateNamespacedGetter struct { namespace string workflowTemplates map[string]*wfv1.WorkflowTemplate diff --git a/pkg/apiclient/sync/sync.pb.go b/pkg/apiclient/sync/sync.pb.go new file mode 100644 index 000000000000..e6e74689bc90 --- /dev/null +++ b/pkg/apiclient/sync/sync.pb.go @@ -0,0 +1,2209 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/apiclient/sync/sync.proto + +package sync + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SyncConfigType int32 + +const ( + SyncConfigType_CONFIG_MAP SyncConfigType = 0 + SyncConfigType_DATABASE SyncConfigType = 1 +) + +var SyncConfigType_name = map[int32]string{ + 0: "CONFIG_MAP", + 1: "DATABASE", +} + +var SyncConfigType_value = map[string]int32{ + "CONFIG_MAP": 0, + "DATABASE": 1, +} + +func (x SyncConfigType) String() string { + return proto.EnumName(SyncConfigType_name, int32(x)) +} + +func (SyncConfigType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_74ab334b2e266b46, []int{0} +} + +type CreateSyncLimitRequest struct { + Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + SizeLimit int32 `protobuf:"varint,5,opt,name=sizeLimit,proto3" json:"sizeLimit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSyncLimitRequest) Reset() { *m = CreateSyncLimitRequest{} } +func (m *CreateSyncLimitRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSyncLimitRequest) ProtoMessage() {} +func (*CreateSyncLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_74ab334b2e266b46, []int{0} +} +func (m *CreateSyncLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateSyncLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateSyncLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateSyncLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSyncLimitRequest.Merge(m, src) +} +func (m *CreateSyncLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *CreateSyncLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSyncLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSyncLimitRequest proto.InternalMessageInfo + +func (m *CreateSyncLimitRequest) GetType() SyncConfigType { + if m != nil { + return m.Type + } + return SyncConfigType_CONFIG_MAP +} + +func (m *CreateSyncLimitRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *CreateSyncLimitRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSyncLimitRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *CreateSyncLimitRequest) GetSizeLimit() int32 { + if m != nil { + return m.SizeLimit + } + return 0 +} + +type SyncLimitResponse struct { + Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + SizeLimit int32 `protobuf:"varint,5,opt,name=sizeLimit,proto3" json:"sizeLimit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SyncLimitResponse) Reset() { *m = SyncLimitResponse{} } +func (m *SyncLimitResponse) String() string { return proto.CompactTextString(m) } +func (*SyncLimitResponse) ProtoMessage() {} +func (*SyncLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_74ab334b2e266b46, []int{1} +} +func (m *SyncLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SyncLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SyncLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SyncLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SyncLimitResponse.Merge(m, src) +} +func (m *SyncLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *SyncLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SyncLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SyncLimitResponse proto.InternalMessageInfo + +func (m *SyncLimitResponse) GetType() SyncConfigType { + if m != nil { + return m.Type + } + return SyncConfigType_CONFIG_MAP +} + +func (m *SyncLimitResponse) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *SyncLimitResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SyncLimitResponse) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *SyncLimitResponse) GetSizeLimit() int32 { + if m != nil { + return m.SizeLimit + } + return 0 +} + +type GetSyncLimitRequest struct { + Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSyncLimitRequest) Reset() { *m = GetSyncLimitRequest{} } +func (m *GetSyncLimitRequest) String() string { return proto.CompactTextString(m) } +func (*GetSyncLimitRequest) ProtoMessage() {} +func (*GetSyncLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_74ab334b2e266b46, []int{2} +} +func (m *GetSyncLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSyncLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSyncLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSyncLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSyncLimitRequest.Merge(m, src) +} +func (m *GetSyncLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSyncLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSyncLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSyncLimitRequest proto.InternalMessageInfo + +func (m *GetSyncLimitRequest) GetType() SyncConfigType { + if m != nil { + return m.Type + } + return SyncConfigType_CONFIG_MAP +} + +func (m *GetSyncLimitRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *GetSyncLimitRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetSyncLimitRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type UpdateSyncLimitRequest struct { + Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + SizeLimit int32 `protobuf:"varint,5,opt,name=sizeLimit,proto3" json:"sizeLimit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateSyncLimitRequest) Reset() { *m = UpdateSyncLimitRequest{} } +func (m *UpdateSyncLimitRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateSyncLimitRequest) ProtoMessage() {} +func (*UpdateSyncLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_74ab334b2e266b46, []int{3} +} +func (m *UpdateSyncLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateSyncLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateSyncLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateSyncLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateSyncLimitRequest.Merge(m, src) +} +func (m *UpdateSyncLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateSyncLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateSyncLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateSyncLimitRequest proto.InternalMessageInfo + +func (m *UpdateSyncLimitRequest) GetType() SyncConfigType { + if m != nil { + return m.Type + } + return SyncConfigType_CONFIG_MAP +} + +func (m *UpdateSyncLimitRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *UpdateSyncLimitRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateSyncLimitRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *UpdateSyncLimitRequest) GetSizeLimit() int32 { + if m != nil { + return m.SizeLimit + } + return 0 +} + +type DeleteSyncLimitRequest struct { + Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSyncLimitRequest) Reset() { *m = DeleteSyncLimitRequest{} } +func (m *DeleteSyncLimitRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSyncLimitRequest) ProtoMessage() {} +func (*DeleteSyncLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_74ab334b2e266b46, []int{4} +} +func (m *DeleteSyncLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteSyncLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteSyncLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteSyncLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSyncLimitRequest.Merge(m, src) +} +func (m *DeleteSyncLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteSyncLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSyncLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSyncLimitRequest proto.InternalMessageInfo + +func (m *DeleteSyncLimitRequest) GetType() SyncConfigType { + if m != nil { + return m.Type + } + return SyncConfigType_CONFIG_MAP +} + +func (m *DeleteSyncLimitRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *DeleteSyncLimitRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteSyncLimitRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type DeleteSyncLimitResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSyncLimitResponse) Reset() { *m = DeleteSyncLimitResponse{} } +func (m *DeleteSyncLimitResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSyncLimitResponse) ProtoMessage() {} +func (*DeleteSyncLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_74ab334b2e266b46, []int{5} +} +func (m *DeleteSyncLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteSyncLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteSyncLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteSyncLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSyncLimitResponse.Merge(m, src) +} +func (m *DeleteSyncLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *DeleteSyncLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSyncLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSyncLimitResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("sync.SyncConfigType", SyncConfigType_name, SyncConfigType_value) + proto.RegisterType((*CreateSyncLimitRequest)(nil), "sync.CreateSyncLimitRequest") + proto.RegisterType((*SyncLimitResponse)(nil), "sync.SyncLimitResponse") + proto.RegisterType((*GetSyncLimitRequest)(nil), "sync.GetSyncLimitRequest") + proto.RegisterType((*UpdateSyncLimitRequest)(nil), "sync.UpdateSyncLimitRequest") + proto.RegisterType((*DeleteSyncLimitRequest)(nil), "sync.DeleteSyncLimitRequest") + proto.RegisterType((*DeleteSyncLimitResponse)(nil), "sync.DeleteSyncLimitResponse") +} + +func init() { proto.RegisterFile("pkg/apiclient/sync/sync.proto", fileDescriptor_74ab334b2e266b46) } + +var fileDescriptor_74ab334b2e266b46 = []byte{ + // 483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0x66, 0x9b, 0x14, 0xd1, 0xa1, 0x4a, 0xc2, 0x82, 0x5a, 0x37, 0x4a, 0xa2, 0xc8, 0x95, 0x50, + 0x88, 0x44, 0x2c, 0x8a, 0xb8, 0x70, 0x4b, 0x53, 0xa8, 0x90, 0xf8, 0x53, 0x52, 0x2e, 0x5c, 0x90, + 0x6b, 0xa6, 0x66, 0x9b, 0x78, 0x77, 0xf1, 0x6e, 0x53, 0x99, 0xaa, 0x17, 0x90, 0x78, 0x01, 0x1e, + 0x80, 0x0b, 0x0f, 0xc3, 0x11, 0x09, 0x71, 0x47, 0x11, 0x0f, 0x82, 0x76, 0xad, 0x36, 0x4d, 0x6a, + 0xab, 0xc7, 0xf6, 0x62, 0x8d, 0x67, 0xc7, 0xf3, 0x7d, 0x9f, 0xe7, 0x9b, 0x85, 0xba, 0x1c, 0x86, + 0x9e, 0x2f, 0x59, 0x30, 0x62, 0xc8, 0xb5, 0xa7, 0x12, 0x1e, 0xd8, 0x47, 0x47, 0xc6, 0x42, 0x0b, + 0x5a, 0x34, 0x71, 0xb5, 0x16, 0x0a, 0x11, 0x8e, 0xd0, 0xd4, 0x79, 0x3e, 0xe7, 0x42, 0xfb, 0x9a, + 0x09, 0xae, 0xd2, 0x1a, 0xf7, 0x07, 0x81, 0x95, 0x5e, 0x8c, 0xbe, 0xc6, 0x41, 0xc2, 0x83, 0xe7, + 0x2c, 0x62, 0xba, 0x8f, 0x1f, 0x0f, 0x50, 0x69, 0xda, 0x82, 0xa2, 0x4e, 0x24, 0x3a, 0xa4, 0x49, + 0x5a, 0xa5, 0x8d, 0x3b, 0x1d, 0xdb, 0xd9, 0x54, 0xf5, 0x04, 0xdf, 0x63, 0xe1, 0x4e, 0x22, 0xb1, + 0x6f, 0x2b, 0x68, 0x0d, 0x96, 0xb8, 0x1f, 0xa1, 0x92, 0x7e, 0x80, 0xce, 0x42, 0x93, 0xb4, 0x96, + 0xfa, 0xd3, 0x04, 0xa5, 0x50, 0x34, 0x2f, 0x4e, 0xc1, 0x1e, 0xd8, 0x98, 0x56, 0xa0, 0x30, 0xc4, + 0xc4, 0x29, 0xda, 0x94, 0x09, 0x4d, 0x0f, 0xc5, 0x3e, 0xa1, 0x65, 0xe0, 0x2c, 0x36, 0x49, 0x6b, + 0xb1, 0x3f, 0x4d, 0xb8, 0xdf, 0x09, 0xdc, 0x3a, 0x43, 0x50, 0x49, 0xc1, 0x15, 0x5e, 0x29, 0x86, + 0x5f, 0x08, 0xdc, 0xde, 0x46, 0x7d, 0xb9, 0x7f, 0xd1, 0x8e, 0xf3, 0x8d, 0x7c, 0x7f, 0xd5, 0xc7, + 0xf9, 0x95, 0xc0, 0xca, 0x16, 0x8e, 0xf0, 0xb2, 0x69, 0xba, 0x6b, 0xb0, 0x7a, 0x8e, 0x47, 0x6a, + 0xae, 0x76, 0x07, 0x4a, 0xb3, 0xb0, 0xb4, 0x04, 0xd0, 0x7b, 0xf5, 0xf2, 0xe9, 0xb3, 0xed, 0x77, + 0x2f, 0xba, 0xaf, 0x2b, 0xd7, 0xe8, 0x32, 0xdc, 0xd8, 0xea, 0xee, 0x74, 0x37, 0xbb, 0x83, 0x27, + 0x15, 0xb2, 0xf1, 0xa7, 0x00, 0x37, 0xcd, 0x07, 0x03, 0x8c, 0xc7, 0x2c, 0x40, 0x1a, 0x41, 0x79, + 0x6e, 0xb1, 0x68, 0x2d, 0x55, 0x93, 0xbd, 0x6f, 0xd5, 0xd5, 0xa9, 0xd6, 0x19, 0x26, 0xee, 0xfa, + 0xe7, 0xdf, 0xff, 0xbe, 0x2d, 0xd4, 0x5d, 0xc7, 0xee, 0xf0, 0xf8, 0x41, 0xba, 0xe8, 0x47, 0xa7, + 0x5a, 0x8f, 0x1f, 0x93, 0x36, 0xdd, 0x87, 0xe5, 0xb3, 0xf6, 0xa3, 0x6b, 0x69, 0xb7, 0x0c, 0x4b, + 0xe6, 0x03, 0xdd, 0xb5, 0x40, 0x4d, 0xda, 0xc8, 0x03, 0xf2, 0x8e, 0x86, 0x98, 0x1c, 0x53, 0x05, + 0xe5, 0x39, 0x93, 0x9d, 0x48, 0xcb, 0xf6, 0x5e, 0x3e, 0xe2, 0x3d, 0x8b, 0xb8, 0x5e, 0xbd, 0x00, + 0xd1, 0x08, 0x1c, 0x43, 0x79, 0x6e, 0x54, 0x27, 0xa0, 0xd9, 0x4e, 0xaa, 0xd6, 0x73, 0x4e, 0x67, + 0xc5, 0xb6, 0x2f, 0x80, 0xde, 0xec, 0xfd, 0x9c, 0x34, 0xc8, 0xaf, 0x49, 0x83, 0xfc, 0x9d, 0x34, + 0xc8, 0xdb, 0x47, 0x21, 0xd3, 0x1f, 0x0e, 0x76, 0x3b, 0x81, 0x88, 0x3c, 0x3f, 0x0e, 0x85, 0x8c, + 0xc5, 0xbe, 0x0d, 0xee, 0x1f, 0x8a, 0x78, 0xb8, 0x37, 0x12, 0x87, 0xca, 0x3b, 0x7f, 0x2b, 0xef, + 0x5e, 0xb7, 0xb7, 0xed, 0xc3, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xce, 0x5f, 0xda, 0xd4, 0xb2, + 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SyncServiceClient is the client API for SyncService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SyncServiceClient interface { + CreateSyncLimit(ctx context.Context, in *CreateSyncLimitRequest, opts ...grpc.CallOption) (*SyncLimitResponse, error) + GetSyncLimit(ctx context.Context, in *GetSyncLimitRequest, opts ...grpc.CallOption) (*SyncLimitResponse, error) + UpdateSyncLimit(ctx context.Context, in *UpdateSyncLimitRequest, opts ...grpc.CallOption) (*SyncLimitResponse, error) + DeleteSyncLimit(ctx context.Context, in *DeleteSyncLimitRequest, opts ...grpc.CallOption) (*DeleteSyncLimitResponse, error) +} + +type syncServiceClient struct { + cc *grpc.ClientConn +} + +func NewSyncServiceClient(cc *grpc.ClientConn) SyncServiceClient { + return &syncServiceClient{cc} +} + +func (c *syncServiceClient) CreateSyncLimit(ctx context.Context, in *CreateSyncLimitRequest, opts ...grpc.CallOption) (*SyncLimitResponse, error) { + out := new(SyncLimitResponse) + err := c.cc.Invoke(ctx, "/sync.SyncService/CreateSyncLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *syncServiceClient) GetSyncLimit(ctx context.Context, in *GetSyncLimitRequest, opts ...grpc.CallOption) (*SyncLimitResponse, error) { + out := new(SyncLimitResponse) + err := c.cc.Invoke(ctx, "/sync.SyncService/GetSyncLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *syncServiceClient) UpdateSyncLimit(ctx context.Context, in *UpdateSyncLimitRequest, opts ...grpc.CallOption) (*SyncLimitResponse, error) { + out := new(SyncLimitResponse) + err := c.cc.Invoke(ctx, "/sync.SyncService/UpdateSyncLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *syncServiceClient) DeleteSyncLimit(ctx context.Context, in *DeleteSyncLimitRequest, opts ...grpc.CallOption) (*DeleteSyncLimitResponse, error) { + out := new(DeleteSyncLimitResponse) + err := c.cc.Invoke(ctx, "/sync.SyncService/DeleteSyncLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SyncServiceServer is the server API for SyncService service. +type SyncServiceServer interface { + CreateSyncLimit(context.Context, *CreateSyncLimitRequest) (*SyncLimitResponse, error) + GetSyncLimit(context.Context, *GetSyncLimitRequest) (*SyncLimitResponse, error) + UpdateSyncLimit(context.Context, *UpdateSyncLimitRequest) (*SyncLimitResponse, error) + DeleteSyncLimit(context.Context, *DeleteSyncLimitRequest) (*DeleteSyncLimitResponse, error) +} + +// UnimplementedSyncServiceServer can be embedded to have forward compatible implementations. +type UnimplementedSyncServiceServer struct { +} + +func (*UnimplementedSyncServiceServer) CreateSyncLimit(ctx context.Context, req *CreateSyncLimitRequest) (*SyncLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSyncLimit not implemented") +} +func (*UnimplementedSyncServiceServer) GetSyncLimit(ctx context.Context, req *GetSyncLimitRequest) (*SyncLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSyncLimit not implemented") +} +func (*UnimplementedSyncServiceServer) UpdateSyncLimit(ctx context.Context, req *UpdateSyncLimitRequest) (*SyncLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSyncLimit not implemented") +} +func (*UnimplementedSyncServiceServer) DeleteSyncLimit(ctx context.Context, req *DeleteSyncLimitRequest) (*DeleteSyncLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSyncLimit not implemented") +} + +func RegisterSyncServiceServer(s *grpc.Server, srv SyncServiceServer) { + s.RegisterService(&_SyncService_serviceDesc, srv) +} + +func _SyncService_CreateSyncLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSyncLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).CreateSyncLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/sync.SyncService/CreateSyncLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).CreateSyncLimit(ctx, req.(*CreateSyncLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SyncService_GetSyncLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSyncLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).GetSyncLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/sync.SyncService/GetSyncLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).GetSyncLimit(ctx, req.(*GetSyncLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SyncService_UpdateSyncLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSyncLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).UpdateSyncLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/sync.SyncService/UpdateSyncLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).UpdateSyncLimit(ctx, req.(*UpdateSyncLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SyncService_DeleteSyncLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSyncLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).DeleteSyncLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/sync.SyncService/DeleteSyncLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).DeleteSyncLimit(ctx, req.(*DeleteSyncLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SyncService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "sync.SyncService", + HandlerType: (*SyncServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSyncLimit", + Handler: _SyncService_CreateSyncLimit_Handler, + }, + { + MethodName: "GetSyncLimit", + Handler: _SyncService_GetSyncLimit_Handler, + }, + { + MethodName: "UpdateSyncLimit", + Handler: _SyncService_UpdateSyncLimit_Handler, + }, + { + MethodName: "DeleteSyncLimit", + Handler: _SyncService_DeleteSyncLimit_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/apiclient/sync/sync.proto", +} + +func (m *CreateSyncLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSyncLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SizeLimit != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.SizeLimit)) + i-- + dAtA[i] = 0x28 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintSync(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x22 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSync(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintSync(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SyncLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SyncLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SyncLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SizeLimit != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.SizeLimit)) + i-- + dAtA[i] = 0x28 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintSync(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x22 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSync(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintSync(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetSyncLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSyncLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintSync(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x22 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSync(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintSync(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UpdateSyncLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSyncLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SizeLimit != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.SizeLimit)) + i-- + dAtA[i] = 0x28 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintSync(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x22 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSync(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintSync(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DeleteSyncLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteSyncLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintSync(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x22 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintSync(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintSync(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintSync(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DeleteSyncLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteSyncLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteSyncLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintSync(dAtA []byte, offset int, v uint64) int { + offset -= sovSync(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CreateSyncLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSync(uint64(m.Type)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + if m.SizeLimit != 0 { + n += 1 + sovSync(uint64(m.SizeLimit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SyncLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSync(uint64(m.Type)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + if m.SizeLimit != 0 { + n += 1 + sovSync(uint64(m.SizeLimit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetSyncLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSync(uint64(m.Type)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateSyncLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSync(uint64(m.Type)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + if m.SizeLimit != 0 { + n += 1 + sovSync(uint64(m.SizeLimit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteSyncLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovSync(uint64(m.Type)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovSync(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteSyncLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSync(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSync(x uint64) (n int) { + return sovSync(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CreateSyncLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSyncLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSyncLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SyncConfigType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + } + m.SizeLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SyncLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SyncLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SyncLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SyncConfigType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + } + m.SizeLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSyncLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSyncLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSyncLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SyncConfigType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSyncLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSyncLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSyncLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SyncConfigType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + } + m.SizeLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteSyncLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteSyncLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteSyncLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SyncConfigType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSync + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteSyncLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteSyncLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteSyncLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipSync(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSync(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSync + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSync + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSync + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSync = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSync = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSync = fmt.Errorf("proto: unexpected end of group") +) diff --git a/pkg/apiclient/sync/sync.pb.gw.go b/pkg/apiclient/sync/sync.pb.gw.go new file mode 100644 index 000000000000..f5efce5e87fd --- /dev/null +++ b/pkg/apiclient/sync/sync.pb.gw.go @@ -0,0 +1,626 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: pkg/apiclient/sync/sync.proto + +/* +Package sync is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package sync + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_SyncService_CreateSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, client SyncServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateSyncLimitRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.CreateSyncLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SyncService_CreateSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, server SyncServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateSyncLimitRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.CreateSyncLimit(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SyncService_GetSyncLimit_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "key": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_SyncService_GetSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, client SyncServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetSyncLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SyncService_GetSyncLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetSyncLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SyncService_GetSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, server SyncServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetSyncLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SyncService_GetSyncLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetSyncLimit(ctx, &protoReq) + return msg, metadata, err + +} + +func request_SyncService_UpdateSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, client SyncServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateSyncLimitRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := client.UpdateSyncLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SyncService_UpdateSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, server SyncServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateSyncLimitRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + msg, err := server.UpdateSyncLimit(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SyncService_DeleteSyncLimit_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "key": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_SyncService_DeleteSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, client SyncServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteSyncLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SyncService_DeleteSyncLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DeleteSyncLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SyncService_DeleteSyncLimit_0(ctx context.Context, marshaler runtime.Marshaler, server SyncServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteSyncLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["key"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "key") + } + + protoReq.Key, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "key", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SyncService_DeleteSyncLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DeleteSyncLimit(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterSyncServiceHandlerServer registers the http handlers for service SyncService to "mux". +// UnaryRPC :call SyncServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSyncServiceHandlerFromEndpoint instead. +func RegisterSyncServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SyncServiceServer) error { + + mux.Handle("POST", pattern_SyncService_CreateSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SyncService_CreateSyncLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_CreateSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SyncService_GetSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SyncService_GetSyncLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_GetSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_SyncService_UpdateSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SyncService_UpdateSyncLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_UpdateSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_SyncService_DeleteSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SyncService_DeleteSyncLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_DeleteSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterSyncServiceHandlerFromEndpoint is same as RegisterSyncServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterSyncServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterSyncServiceHandler(ctx, mux, conn) +} + +// RegisterSyncServiceHandler registers the http handlers for service SyncService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterSyncServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterSyncServiceHandlerClient(ctx, mux, NewSyncServiceClient(conn)) +} + +// RegisterSyncServiceHandlerClient registers the http handlers for service SyncService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SyncServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SyncServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "SyncServiceClient" to call the correct interceptors. +func RegisterSyncServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SyncServiceClient) error { + + mux.Handle("POST", pattern_SyncService_CreateSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncService_CreateSyncLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_CreateSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SyncService_GetSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncService_GetSyncLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_GetSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_SyncService_UpdateSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncService_UpdateSyncLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_UpdateSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_SyncService_DeleteSyncLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SyncService_DeleteSyncLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_SyncService_DeleteSyncLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_SyncService_CreateSyncLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "sync", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SyncService_GetSyncLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "sync", "namespace", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SyncService_UpdateSyncLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "sync", "namespace", "key"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_SyncService_DeleteSyncLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "sync", "namespace", "key"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_SyncService_CreateSyncLimit_0 = runtime.ForwardResponseMessage + + forward_SyncService_GetSyncLimit_0 = runtime.ForwardResponseMessage + + forward_SyncService_UpdateSyncLimit_0 = runtime.ForwardResponseMessage + + forward_SyncService_DeleteSyncLimit_0 = runtime.ForwardResponseMessage +) diff --git a/pkg/apiclient/sync/sync.proto b/pkg/apiclient/sync/sync.proto new file mode 100644 index 000000000000..8649fcd1b610 --- /dev/null +++ b/pkg/apiclient/sync/sync.proto @@ -0,0 +1,73 @@ +syntax = "proto3"; +option go_package = "github.com/argoproj/argo-workflows/pkg/apiclient/sync"; + +import "google/api/annotations.proto"; + +package sync; + +enum SyncConfigType { + CONFIG_MAP = 0; + DATABASE = 1; +} + +message CreateSyncLimitRequest { + SyncConfigType type = 1; + string namespace = 2; + string name = 3; + string key = 4; + int32 sizeLimit = 5; +} + +message SyncLimitResponse { + SyncConfigType type = 1; + string namespace = 2; + string name = 3; + string key = 4; + int32 sizeLimit = 5; +} + +message GetSyncLimitRequest { + SyncConfigType type = 1; + string namespace = 2; + string name = 3; + string key = 4; +} + +message UpdateSyncLimitRequest { + SyncConfigType type = 1; + string namespace = 2; + string name = 3; + string key = 4; + int32 sizeLimit = 5; +} + +message DeleteSyncLimitRequest { + SyncConfigType type = 1; + string namespace = 2; + string name = 3; + string key = 4; +} + +message DeleteSyncLimitResponse { +} + +service SyncService { + rpc CreateSyncLimit(CreateSyncLimitRequest) returns (SyncLimitResponse) { + option (google.api.http) = { + post : "/api/v1/sync/{namespace}" + body : "*" + }; + } + rpc GetSyncLimit(GetSyncLimitRequest) returns (SyncLimitResponse) { + option (google.api.http).get = "/api/v1/sync/{namespace}/{key}"; + }; + rpc UpdateSyncLimit(UpdateSyncLimitRequest) returns (SyncLimitResponse) { + option (google.api.http) = { + put : "/api/v1/sync/{namespace}/{key}" + body : "*" + }; + } + rpc DeleteSyncLimit(DeleteSyncLimitRequest) returns (DeleteSyncLimitResponse) { + option (google.api.http).delete = "/api/v1/sync/{namespace}/{key}"; + }; +} diff --git a/sdks/java/client/docs/SyncCreateSyncLimitRequest.md b/sdks/java/client/docs/SyncCreateSyncLimitRequest.md new file mode 100644 index 000000000000..844d242dda90 --- /dev/null +++ b/sdks/java/client/docs/SyncCreateSyncLimitRequest.md @@ -0,0 +1,17 @@ + + +# SyncCreateSyncLimitRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**key** | **String** | | [optional] +**name** | **String** | | [optional] +**namespace** | **String** | | [optional] +**sizeLimit** | **Integer** | | [optional] +**type** | **SyncSyncConfigType** | | [optional] + + + diff --git a/sdks/java/client/docs/SyncServiceApi.md b/sdks/java/client/docs/SyncServiceApi.md new file mode 100644 index 000000000000..1617301dc385 --- /dev/null +++ b/sdks/java/client/docs/SyncServiceApi.md @@ -0,0 +1,302 @@ +# SyncServiceApi + +All URIs are relative to *http://localhost:2746* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**syncServiceCreateSyncLimit**](SyncServiceApi.md#syncServiceCreateSyncLimit) | **POST** /api/v1/sync/{namespace} | +[**syncServiceDeleteSyncLimit**](SyncServiceApi.md#syncServiceDeleteSyncLimit) | **DELETE** /api/v1/sync/{namespace}/{key} | +[**syncServiceGetSyncLimit**](SyncServiceApi.md#syncServiceGetSyncLimit) | **GET** /api/v1/sync/{namespace}/{key} | +[**syncServiceUpdateSyncLimit**](SyncServiceApi.md#syncServiceUpdateSyncLimit) | **PUT** /api/v1/sync/{namespace}/{key} | + + + +# **syncServiceCreateSyncLimit** +> SyncSyncLimitResponse syncServiceCreateSyncLimit(namespace, body) + + + +### Example +```java +// Import classes: +import io.argoproj.workflow.ApiClient; +import io.argoproj.workflow.ApiException; +import io.argoproj.workflow.Configuration; +import io.argoproj.workflow.auth.*; +import io.argoproj.workflow.models.*; +import io.argoproj.workflow.apis.SyncServiceApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("http://localhost:2746"); + + // Configure API key authorization: BearerToken + ApiKeyAuth BearerToken = (ApiKeyAuth) defaultClient.getAuthentication("BearerToken"); + BearerToken.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //BearerToken.setApiKeyPrefix("Token"); + + SyncServiceApi apiInstance = new SyncServiceApi(defaultClient); + String namespace = "namespace_example"; // String | + SyncCreateSyncLimitRequest body = new SyncCreateSyncLimitRequest(); // SyncCreateSyncLimitRequest | + try { + SyncSyncLimitResponse result = apiInstance.syncServiceCreateSyncLimit(namespace, body); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling SyncServiceApi#syncServiceCreateSyncLimit"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **String**| | + **body** | [**SyncCreateSyncLimitRequest**](SyncCreateSyncLimitRequest.md)| | + +### Return type + +[**SyncSyncLimitResponse**](SyncSyncLimitResponse.md) + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + + +# **syncServiceDeleteSyncLimit** +> Object syncServiceDeleteSyncLimit(namespace, key, type, name) + + + +### Example +```java +// Import classes: +import io.argoproj.workflow.ApiClient; +import io.argoproj.workflow.ApiException; +import io.argoproj.workflow.Configuration; +import io.argoproj.workflow.auth.*; +import io.argoproj.workflow.models.*; +import io.argoproj.workflow.apis.SyncServiceApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("http://localhost:2746"); + + // Configure API key authorization: BearerToken + ApiKeyAuth BearerToken = (ApiKeyAuth) defaultClient.getAuthentication("BearerToken"); + BearerToken.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //BearerToken.setApiKeyPrefix("Token"); + + SyncServiceApi apiInstance = new SyncServiceApi(defaultClient); + String namespace = "namespace_example"; // String | + String key = "key_example"; // String | + String type = "CONFIG_MAP"; // String | + String name = "name_example"; // String | + try { + Object result = apiInstance.syncServiceDeleteSyncLimit(namespace, key, type, name); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling SyncServiceApi#syncServiceDeleteSyncLimit"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **String**| | + **key** | **String**| | + **type** | **String**| | [optional] [default to CONFIG_MAP] [enum: CONFIG_MAP, DATABASE] + **name** | **String**| | [optional] + +### Return type + +**Object** + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + + +# **syncServiceGetSyncLimit** +> SyncSyncLimitResponse syncServiceGetSyncLimit(namespace, key, type, name) + + + +### Example +```java +// Import classes: +import io.argoproj.workflow.ApiClient; +import io.argoproj.workflow.ApiException; +import io.argoproj.workflow.Configuration; +import io.argoproj.workflow.auth.*; +import io.argoproj.workflow.models.*; +import io.argoproj.workflow.apis.SyncServiceApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("http://localhost:2746"); + + // Configure API key authorization: BearerToken + ApiKeyAuth BearerToken = (ApiKeyAuth) defaultClient.getAuthentication("BearerToken"); + BearerToken.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //BearerToken.setApiKeyPrefix("Token"); + + SyncServiceApi apiInstance = new SyncServiceApi(defaultClient); + String namespace = "namespace_example"; // String | + String key = "key_example"; // String | + String type = "CONFIG_MAP"; // String | + String name = "name_example"; // String | + try { + SyncSyncLimitResponse result = apiInstance.syncServiceGetSyncLimit(namespace, key, type, name); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling SyncServiceApi#syncServiceGetSyncLimit"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **String**| | + **key** | **String**| | + **type** | **String**| | [optional] [default to CONFIG_MAP] [enum: CONFIG_MAP, DATABASE] + **name** | **String**| | [optional] + +### Return type + +[**SyncSyncLimitResponse**](SyncSyncLimitResponse.md) + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + + +# **syncServiceUpdateSyncLimit** +> SyncSyncLimitResponse syncServiceUpdateSyncLimit(namespace, key, body) + + + +### Example +```java +// Import classes: +import io.argoproj.workflow.ApiClient; +import io.argoproj.workflow.ApiException; +import io.argoproj.workflow.Configuration; +import io.argoproj.workflow.auth.*; +import io.argoproj.workflow.models.*; +import io.argoproj.workflow.apis.SyncServiceApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("http://localhost:2746"); + + // Configure API key authorization: BearerToken + ApiKeyAuth BearerToken = (ApiKeyAuth) defaultClient.getAuthentication("BearerToken"); + BearerToken.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //BearerToken.setApiKeyPrefix("Token"); + + SyncServiceApi apiInstance = new SyncServiceApi(defaultClient); + String namespace = "namespace_example"; // String | + String key = "key_example"; // String | + SyncUpdateSyncLimitRequest body = new SyncUpdateSyncLimitRequest(); // SyncUpdateSyncLimitRequest | + try { + SyncSyncLimitResponse result = apiInstance.syncServiceUpdateSyncLimit(namespace, key, body); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling SyncServiceApi#syncServiceUpdateSyncLimit"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **String**| | + **key** | **String**| | + **body** | [**SyncUpdateSyncLimitRequest**](SyncUpdateSyncLimitRequest.md)| | + +### Return type + +[**SyncSyncLimitResponse**](SyncSyncLimitResponse.md) + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + diff --git a/sdks/java/client/docs/SyncSyncConfigType.md b/sdks/java/client/docs/SyncSyncConfigType.md new file mode 100644 index 000000000000..bf7836cac874 --- /dev/null +++ b/sdks/java/client/docs/SyncSyncConfigType.md @@ -0,0 +1,13 @@ + + +# SyncSyncConfigType + +## Enum + + +* `CONFIG_MAP` (value: `"CONFIG_MAP"`) + +* `DATABASE` (value: `"DATABASE"`) + + + diff --git a/sdks/java/client/docs/SyncSyncLimitResponse.md b/sdks/java/client/docs/SyncSyncLimitResponse.md new file mode 100644 index 000000000000..1a2eb35b407b --- /dev/null +++ b/sdks/java/client/docs/SyncSyncLimitResponse.md @@ -0,0 +1,17 @@ + + +# SyncSyncLimitResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**key** | **String** | | [optional] +**name** | **String** | | [optional] +**namespace** | **String** | | [optional] +**sizeLimit** | **Integer** | | [optional] +**type** | **SyncSyncConfigType** | | [optional] + + + diff --git a/sdks/java/client/docs/SyncUpdateSyncLimitRequest.md b/sdks/java/client/docs/SyncUpdateSyncLimitRequest.md new file mode 100644 index 000000000000..1342aa2ba186 --- /dev/null +++ b/sdks/java/client/docs/SyncUpdateSyncLimitRequest.md @@ -0,0 +1,17 @@ + + +# SyncUpdateSyncLimitRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**key** | **String** | | [optional] +**name** | **String** | | [optional] +**namespace** | **String** | | [optional] +**sizeLimit** | **Integer** | | [optional] +**type** | **SyncSyncConfigType** | | [optional] + + + diff --git a/sdks/python/client/argo_workflows/api/sync_service_api.py b/sdks/python/client/argo_workflows/api/sync_service_api.py new file mode 100644 index 000000000000..26fd9db14223 --- /dev/null +++ b/sdks/python/client/argo_workflows/api/sync_service_api.py @@ -0,0 +1,641 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.api_client import ApiClient, Endpoint as _Endpoint +from argo_workflows.model_utils import ( # noqa: F401 + check_allowed_values, + check_validations, + date, + datetime, + file_type, + none_type, + validate_and_convert_types +) +from argo_workflows.model.grpc_gateway_runtime_error import GrpcGatewayRuntimeError +from argo_workflows.model.sync_create_sync_limit_request import SyncCreateSyncLimitRequest +from argo_workflows.model.sync_sync_limit_response import SyncSyncLimitResponse +from argo_workflows.model.sync_update_sync_limit_request import SyncUpdateSyncLimitRequest + + +class SyncServiceApi(object): + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None): + if api_client is None: + api_client = ApiClient() + self.api_client = api_client + self.create_sync_limit_endpoint = _Endpoint( + settings={ + 'response_type': (SyncSyncLimitResponse,), + 'auth': [ + 'BearerToken' + ], + 'endpoint_path': '/api/v1/sync/{namespace}', + 'operation_id': 'create_sync_limit', + 'http_method': 'POST', + 'servers': None, + }, + params_map={ + 'all': [ + 'namespace', + 'body', + ], + 'required': [ + 'namespace', + 'body', + ], + 'nullable': [ + ], + 'enum': [ + ], + 'validation': [ + ] + }, + root_map={ + 'validations': { + }, + 'allowed_values': { + }, + 'openapi_types': { + 'namespace': + (str,), + 'body': + (SyncCreateSyncLimitRequest,), + }, + 'attribute_map': { + 'namespace': 'namespace', + }, + 'location_map': { + 'namespace': 'path', + 'body': 'body', + }, + 'collection_format_map': { + } + }, + headers_map={ + 'accept': [ + 'application/json' + ], + 'content_type': [ + 'application/json' + ] + }, + api_client=api_client + ) + self.delete_sync_limit_endpoint = _Endpoint( + settings={ + 'response_type': (bool, date, datetime, dict, float, int, list, str, none_type,), + 'auth': [ + 'BearerToken' + ], + 'endpoint_path': '/api/v1/sync/{namespace}/{key}', + 'operation_id': 'delete_sync_limit', + 'http_method': 'DELETE', + 'servers': None, + }, + params_map={ + 'all': [ + 'namespace', + 'key', + 'type', + 'name', + ], + 'required': [ + 'namespace', + 'key', + ], + 'nullable': [ + ], + 'enum': [ + 'type', + ], + 'validation': [ + ] + }, + root_map={ + 'validations': { + }, + 'allowed_values': { + ('type',): { + + "CONFIG_MAP": "CONFIG_MAP", + "DATABASE": "DATABASE" + }, + }, + 'openapi_types': { + 'namespace': + (str,), + 'key': + (str,), + 'type': + (str,), + 'name': + (str,), + }, + 'attribute_map': { + 'namespace': 'namespace', + 'key': 'key', + 'type': 'type', + 'name': 'name', + }, + 'location_map': { + 'namespace': 'path', + 'key': 'path', + 'type': 'query', + 'name': 'query', + }, + 'collection_format_map': { + } + }, + headers_map={ + 'accept': [ + 'application/json' + ], + 'content_type': [], + }, + api_client=api_client + ) + self.get_sync_limit_endpoint = _Endpoint( + settings={ + 'response_type': (SyncSyncLimitResponse,), + 'auth': [ + 'BearerToken' + ], + 'endpoint_path': '/api/v1/sync/{namespace}/{key}', + 'operation_id': 'get_sync_limit', + 'http_method': 'GET', + 'servers': None, + }, + params_map={ + 'all': [ + 'namespace', + 'key', + 'type', + 'name', + ], + 'required': [ + 'namespace', + 'key', + ], + 'nullable': [ + ], + 'enum': [ + 'type', + ], + 'validation': [ + ] + }, + root_map={ + 'validations': { + }, + 'allowed_values': { + ('type',): { + + "CONFIG_MAP": "CONFIG_MAP", + "DATABASE": "DATABASE" + }, + }, + 'openapi_types': { + 'namespace': + (str,), + 'key': + (str,), + 'type': + (str,), + 'name': + (str,), + }, + 'attribute_map': { + 'namespace': 'namespace', + 'key': 'key', + 'type': 'type', + 'name': 'name', + }, + 'location_map': { + 'namespace': 'path', + 'key': 'path', + 'type': 'query', + 'name': 'query', + }, + 'collection_format_map': { + } + }, + headers_map={ + 'accept': [ + 'application/json' + ], + 'content_type': [], + }, + api_client=api_client + ) + self.update_sync_limit_endpoint = _Endpoint( + settings={ + 'response_type': (SyncSyncLimitResponse,), + 'auth': [ + 'BearerToken' + ], + 'endpoint_path': '/api/v1/sync/{namespace}/{key}', + 'operation_id': 'update_sync_limit', + 'http_method': 'PUT', + 'servers': None, + }, + params_map={ + 'all': [ + 'namespace', + 'key', + 'body', + ], + 'required': [ + 'namespace', + 'key', + 'body', + ], + 'nullable': [ + ], + 'enum': [ + ], + 'validation': [ + ] + }, + root_map={ + 'validations': { + }, + 'allowed_values': { + }, + 'openapi_types': { + 'namespace': + (str,), + 'key': + (str,), + 'body': + (SyncUpdateSyncLimitRequest,), + }, + 'attribute_map': { + 'namespace': 'namespace', + 'key': 'key', + }, + 'location_map': { + 'namespace': 'path', + 'key': 'path', + 'body': 'body', + }, + 'collection_format_map': { + } + }, + headers_map={ + 'accept': [ + 'application/json' + ], + 'content_type': [ + 'application/json' + ] + }, + api_client=api_client + ) + + def create_sync_limit( + self, + namespace, + body, + **kwargs + ): + """create_sync_limit # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.create_sync_limit(namespace, body, async_req=True) + >>> result = thread.get() + + Args: + namespace (str): + body (SyncCreateSyncLimitRequest): + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + SyncSyncLimitResponse + If the method is called asynchronously, returns the request + thread. + """ + kwargs['async_req'] = kwargs.get( + 'async_req', False + ) + kwargs['_return_http_data_only'] = kwargs.get( + '_return_http_data_only', True + ) + kwargs['_preload_content'] = kwargs.get( + '_preload_content', True + ) + kwargs['_request_timeout'] = kwargs.get( + '_request_timeout', None + ) + kwargs['_check_input_type'] = kwargs.get( + '_check_input_type', True + ) + kwargs['_check_return_type'] = kwargs.get( + '_check_return_type', True + ) + kwargs['_spec_property_naming'] = kwargs.get( + '_spec_property_naming', False + ) + kwargs['_content_type'] = kwargs.get( + '_content_type') + kwargs['_host_index'] = kwargs.get('_host_index') + kwargs['namespace'] = \ + namespace + kwargs['body'] = \ + body + return self.create_sync_limit_endpoint.call_with_http_info(**kwargs) + + def delete_sync_limit( + self, + namespace, + key, + **kwargs + ): + """delete_sync_limit # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.delete_sync_limit(namespace, key, async_req=True) + >>> result = thread.get() + + Args: + namespace (str): + key (str): + + Keyword Args: + type (str): [optional] if omitted the server will use the default value of "CONFIG_MAP" + name (str): [optional] + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + bool, date, datetime, dict, float, int, list, str, none_type + If the method is called asynchronously, returns the request + thread. + """ + kwargs['async_req'] = kwargs.get( + 'async_req', False + ) + kwargs['_return_http_data_only'] = kwargs.get( + '_return_http_data_only', True + ) + kwargs['_preload_content'] = kwargs.get( + '_preload_content', True + ) + kwargs['_request_timeout'] = kwargs.get( + '_request_timeout', None + ) + kwargs['_check_input_type'] = kwargs.get( + '_check_input_type', True + ) + kwargs['_check_return_type'] = kwargs.get( + '_check_return_type', True + ) + kwargs['_spec_property_naming'] = kwargs.get( + '_spec_property_naming', False + ) + kwargs['_content_type'] = kwargs.get( + '_content_type') + kwargs['_host_index'] = kwargs.get('_host_index') + kwargs['namespace'] = \ + namespace + kwargs['key'] = \ + key + return self.delete_sync_limit_endpoint.call_with_http_info(**kwargs) + + def get_sync_limit( + self, + namespace, + key, + **kwargs + ): + """get_sync_limit # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.get_sync_limit(namespace, key, async_req=True) + >>> result = thread.get() + + Args: + namespace (str): + key (str): + + Keyword Args: + type (str): [optional] if omitted the server will use the default value of "CONFIG_MAP" + name (str): [optional] + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + SyncSyncLimitResponse + If the method is called asynchronously, returns the request + thread. + """ + kwargs['async_req'] = kwargs.get( + 'async_req', False + ) + kwargs['_return_http_data_only'] = kwargs.get( + '_return_http_data_only', True + ) + kwargs['_preload_content'] = kwargs.get( + '_preload_content', True + ) + kwargs['_request_timeout'] = kwargs.get( + '_request_timeout', None + ) + kwargs['_check_input_type'] = kwargs.get( + '_check_input_type', True + ) + kwargs['_check_return_type'] = kwargs.get( + '_check_return_type', True + ) + kwargs['_spec_property_naming'] = kwargs.get( + '_spec_property_naming', False + ) + kwargs['_content_type'] = kwargs.get( + '_content_type') + kwargs['_host_index'] = kwargs.get('_host_index') + kwargs['namespace'] = \ + namespace + kwargs['key'] = \ + key + return self.get_sync_limit_endpoint.call_with_http_info(**kwargs) + + def update_sync_limit( + self, + namespace, + key, + body, + **kwargs + ): + """update_sync_limit # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.update_sync_limit(namespace, key, body, async_req=True) + >>> result = thread.get() + + Args: + namespace (str): + key (str): + body (SyncUpdateSyncLimitRequest): + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + SyncSyncLimitResponse + If the method is called asynchronously, returns the request + thread. + """ + kwargs['async_req'] = kwargs.get( + 'async_req', False + ) + kwargs['_return_http_data_only'] = kwargs.get( + '_return_http_data_only', True + ) + kwargs['_preload_content'] = kwargs.get( + '_preload_content', True + ) + kwargs['_request_timeout'] = kwargs.get( + '_request_timeout', None + ) + kwargs['_check_input_type'] = kwargs.get( + '_check_input_type', True + ) + kwargs['_check_return_type'] = kwargs.get( + '_check_return_type', True + ) + kwargs['_spec_property_naming'] = kwargs.get( + '_spec_property_naming', False + ) + kwargs['_content_type'] = kwargs.get( + '_content_type') + kwargs['_host_index'] = kwargs.get('_host_index') + kwargs['namespace'] = \ + namespace + kwargs['key'] = \ + key + kwargs['body'] = \ + body + return self.update_sync_limit_endpoint.call_with_http_info(**kwargs) + diff --git a/sdks/python/client/argo_workflows/apis/__init__.py b/sdks/python/client/argo_workflows/apis/__init__.py index bbd2e429fa68..983e2e696b43 100644 --- a/sdks/python/client/argo_workflows/apis/__init__.py +++ b/sdks/python/client/argo_workflows/apis/__init__.py @@ -22,5 +22,6 @@ from argo_workflows.api.event_source_service_api import EventSourceServiceApi from argo_workflows.api.info_service_api import InfoServiceApi from argo_workflows.api.sensor_service_api import SensorServiceApi +from argo_workflows.api.sync_service_api import SyncServiceApi from argo_workflows.api.workflow_service_api import WorkflowServiceApi from argo_workflows.api.workflow_template_service_api import WorkflowTemplateServiceApi diff --git a/sdks/python/client/argo_workflows/model/sync_create_sync_limit_request.py b/sdks/python/client/argo_workflows/model/sync_create_sync_limit_request.py new file mode 100644 index 000000000000..954bec8ff7cf --- /dev/null +++ b/sdks/python/client/argo_workflows/model/sync_create_sync_limit_request.py @@ -0,0 +1,277 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from argo_workflows.exceptions import ApiAttributeError + + +def lazy_import(): + from argo_workflows.model.sync_sync_config_type import SyncSyncConfigType + globals()['SyncSyncConfigType'] = SyncSyncConfigType + + +class SyncCreateSyncLimitRequest(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + 'key': (str,), # noqa: E501 + 'name': (str,), # noqa: E501 + 'namespace': (str,), # noqa: E501 + 'size_limit': (int,), # noqa: E501 + 'type': (SyncSyncConfigType,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + + attribute_map = { + 'key': 'key', # noqa: E501 + 'name': 'name', # noqa: E501 + 'namespace': 'namespace', # noqa: E501 + 'size_limit': 'sizeLimit', # noqa: E501 + 'type': 'type', # noqa: E501 + } + + read_only_vars = { + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + """SyncCreateSyncLimitRequest - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + key (str): [optional] # noqa: E501 + name (str): [optional] # noqa: E501 + namespace (str): [optional] # noqa: E501 + size_limit (int): [optional] # noqa: E501 + type (SyncSyncConfigType): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): # noqa: E501 + """SyncCreateSyncLimitRequest - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + key (str): [optional] # noqa: E501 + name (str): [optional] # noqa: E501 + namespace (str): [optional] # noqa: E501 + size_limit (int): [optional] # noqa: E501 + type (SyncSyncConfigType): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/sdks/python/client/argo_workflows/model/sync_sync_config_type.py b/sdks/python/client/argo_workflows/model/sync_sync_config_type.py new file mode 100644 index 000000000000..1075db334433 --- /dev/null +++ b/sdks/python/client/argo_workflows/model/sync_sync_config_type.py @@ -0,0 +1,274 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from argo_workflows.exceptions import ApiAttributeError + + + +class SyncSyncConfigType(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ('value',): { + 'CONFIG_MAP': "CONFIG_MAP", + 'DATABASE': "DATABASE", + }, + } + + validations = { + } + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + 'value': (str,), + } + + @cached_property + def discriminator(): + return None + + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """SyncSyncConfigType - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501 + + Keyword Args: + value (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop('_path_to_item', ()) + + if 'value' in kwargs: + value = kwargs.pop('value') + elif args: + args = list(args) + value = args.pop(0) + else: + value = "CONFIG_MAP" + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """SyncSyncConfigType - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501 + + Keyword Args: + value (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop('_path_to_item', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if 'value' in kwargs: + value = kwargs.pop('value') + elif args: + args = list(args) + value = args.pop(0) + else: + value = "CONFIG_MAP" + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/sdks/python/client/argo_workflows/model/sync_sync_limit_response.py b/sdks/python/client/argo_workflows/model/sync_sync_limit_response.py new file mode 100644 index 000000000000..14f44df836f1 --- /dev/null +++ b/sdks/python/client/argo_workflows/model/sync_sync_limit_response.py @@ -0,0 +1,277 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from argo_workflows.exceptions import ApiAttributeError + + +def lazy_import(): + from argo_workflows.model.sync_sync_config_type import SyncSyncConfigType + globals()['SyncSyncConfigType'] = SyncSyncConfigType + + +class SyncSyncLimitResponse(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + 'key': (str,), # noqa: E501 + 'name': (str,), # noqa: E501 + 'namespace': (str,), # noqa: E501 + 'size_limit': (int,), # noqa: E501 + 'type': (SyncSyncConfigType,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + + attribute_map = { + 'key': 'key', # noqa: E501 + 'name': 'name', # noqa: E501 + 'namespace': 'namespace', # noqa: E501 + 'size_limit': 'sizeLimit', # noqa: E501 + 'type': 'type', # noqa: E501 + } + + read_only_vars = { + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + """SyncSyncLimitResponse - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + key (str): [optional] # noqa: E501 + name (str): [optional] # noqa: E501 + namespace (str): [optional] # noqa: E501 + size_limit (int): [optional] # noqa: E501 + type (SyncSyncConfigType): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): # noqa: E501 + """SyncSyncLimitResponse - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + key (str): [optional] # noqa: E501 + name (str): [optional] # noqa: E501 + namespace (str): [optional] # noqa: E501 + size_limit (int): [optional] # noqa: E501 + type (SyncSyncConfigType): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/sdks/python/client/argo_workflows/model/sync_update_sync_limit_request.py b/sdks/python/client/argo_workflows/model/sync_update_sync_limit_request.py new file mode 100644 index 000000000000..68f4a91154bb --- /dev/null +++ b/sdks/python/client/argo_workflows/model/sync_update_sync_limit_request.py @@ -0,0 +1,277 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from argo_workflows.exceptions import ApiAttributeError + + +def lazy_import(): + from argo_workflows.model.sync_sync_config_type import SyncSyncConfigType + globals()['SyncSyncConfigType'] = SyncSyncConfigType + + +class SyncUpdateSyncLimitRequest(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + 'key': (str,), # noqa: E501 + 'name': (str,), # noqa: E501 + 'namespace': (str,), # noqa: E501 + 'size_limit': (int,), # noqa: E501 + 'type': (SyncSyncConfigType,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + + attribute_map = { + 'key': 'key', # noqa: E501 + 'name': 'name', # noqa: E501 + 'namespace': 'namespace', # noqa: E501 + 'size_limit': 'sizeLimit', # noqa: E501 + 'type': 'type', # noqa: E501 + } + + read_only_vars = { + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + """SyncUpdateSyncLimitRequest - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + key (str): [optional] # noqa: E501 + name (str): [optional] # noqa: E501 + namespace (str): [optional] # noqa: E501 + size_limit (int): [optional] # noqa: E501 + type (SyncSyncConfigType): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): # noqa: E501 + """SyncUpdateSyncLimitRequest - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + key (str): [optional] # noqa: E501 + name (str): [optional] # noqa: E501 + namespace (str): [optional] # noqa: E501 + size_limit (int): [optional] # noqa: E501 + type (SyncSyncConfigType): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/sdks/python/client/argo_workflows/models/__init__.py b/sdks/python/client/argo_workflows/models/__init__.py index c0246a9a9abe..cec1ab55d4ee 100644 --- a/sdks/python/client/argo_workflows/models/__init__.py +++ b/sdks/python/client/argo_workflows/models/__init__.py @@ -391,6 +391,10 @@ from argo_workflows.model.stream_result_of_io_argoproj_workflow_v1alpha1_workflow_watch_event import StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent from argo_workflows.model.stream_result_of_sensor_log_entry import StreamResultOfSensorLogEntry from argo_workflows.model.stream_result_of_sensor_sensor_watch_event import StreamResultOfSensorSensorWatchEvent +from argo_workflows.model.sync_create_sync_limit_request import SyncCreateSyncLimitRequest +from argo_workflows.model.sync_sync_config_type import SyncSyncConfigType +from argo_workflows.model.sync_sync_limit_response import SyncSyncLimitResponse +from argo_workflows.model.sync_update_sync_limit_request import SyncUpdateSyncLimitRequest from argo_workflows.model.sysctl import Sysctl from argo_workflows.model.tcp_socket_action import TCPSocketAction from argo_workflows.model.toleration import Toleration diff --git a/sdks/python/client/docs/SyncCreateSyncLimitRequest.md b/sdks/python/client/docs/SyncCreateSyncLimitRequest.md new file mode 100644 index 000000000000..90e31d02cde2 --- /dev/null +++ b/sdks/python/client/docs/SyncCreateSyncLimitRequest.md @@ -0,0 +1,16 @@ +# SyncCreateSyncLimitRequest + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**key** | **str** | | [optional] +**name** | **str** | | [optional] +**namespace** | **str** | | [optional] +**size_limit** | **int** | | [optional] +**type** | [**SyncSyncConfigType**](SyncSyncConfigType.md) | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/python/client/docs/SyncServiceApi.md b/sdks/python/client/docs/SyncServiceApi.md new file mode 100644 index 000000000000..98afa8ced0d2 --- /dev/null +++ b/sdks/python/client/docs/SyncServiceApi.md @@ -0,0 +1,367 @@ +# argo_workflows.SyncServiceApi + +All URIs are relative to *http://localhost:2746* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**create_sync_limit**](SyncServiceApi.md#create_sync_limit) | **POST** /api/v1/sync/{namespace} | +[**delete_sync_limit**](SyncServiceApi.md#delete_sync_limit) | **DELETE** /api/v1/sync/{namespace}/{key} | +[**get_sync_limit**](SyncServiceApi.md#get_sync_limit) | **GET** /api/v1/sync/{namespace}/{key} | +[**update_sync_limit**](SyncServiceApi.md#update_sync_limit) | **PUT** /api/v1/sync/{namespace}/{key} | + + +# **create_sync_limit** +> SyncSyncLimitResponse create_sync_limit(namespace, body) + + + +### Example + +* Api Key Authentication (BearerToken): + +```python +import time +import argo_workflows +from argo_workflows.api import sync_service_api +from argo_workflows.model.grpc_gateway_runtime_error import GrpcGatewayRuntimeError +from argo_workflows.model.sync_create_sync_limit_request import SyncCreateSyncLimitRequest +from argo_workflows.model.sync_sync_limit_response import SyncSyncLimitResponse +from pprint import pprint +# Defining the host is optional and defaults to http://localhost:2746 +# See configuration.py for a list of all supported configuration parameters. +configuration = argo_workflows.Configuration( + host = "http://localhost:2746" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: BearerToken +configuration.api_key['BearerToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['BearerToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with argo_workflows.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = sync_service_api.SyncServiceApi(api_client) + namespace = "namespace_example" # str | + body = SyncCreateSyncLimitRequest( + key="key_example", + name="name_example", + namespace="namespace_example", + size_limit=1, + type=SyncSyncConfigType("CONFIG_MAP"), + ) # SyncCreateSyncLimitRequest | + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.create_sync_limit(namespace, body) + pprint(api_response) + except argo_workflows.ApiException as e: + print("Exception when calling SyncServiceApi->create_sync_limit: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **str**| | + **body** | [**SyncCreateSyncLimitRequest**](SyncCreateSyncLimitRequest.md)| | + +### Return type + +[**SyncSyncLimitResponse**](SyncSyncLimitResponse.md) + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **delete_sync_limit** +> bool, date, datetime, dict, float, int, list, str, none_type delete_sync_limit(namespace, key) + + + +### Example + +* Api Key Authentication (BearerToken): + +```python +import time +import argo_workflows +from argo_workflows.api import sync_service_api +from argo_workflows.model.grpc_gateway_runtime_error import GrpcGatewayRuntimeError +from pprint import pprint +# Defining the host is optional and defaults to http://localhost:2746 +# See configuration.py for a list of all supported configuration parameters. +configuration = argo_workflows.Configuration( + host = "http://localhost:2746" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: BearerToken +configuration.api_key['BearerToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['BearerToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with argo_workflows.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = sync_service_api.SyncServiceApi(api_client) + namespace = "namespace_example" # str | + key = "key_example" # str | + type = "CONFIG_MAP" # str | (optional) if omitted the server will use the default value of "CONFIG_MAP" + name = "name_example" # str | (optional) + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.delete_sync_limit(namespace, key) + pprint(api_response) + except argo_workflows.ApiException as e: + print("Exception when calling SyncServiceApi->delete_sync_limit: %s\n" % e) + + # example passing only required values which don't have defaults set + # and optional values + try: + api_response = api_instance.delete_sync_limit(namespace, key, type=type, name=name) + pprint(api_response) + except argo_workflows.ApiException as e: + print("Exception when calling SyncServiceApi->delete_sync_limit: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **str**| | + **key** | **str**| | + **type** | **str**| | [optional] if omitted the server will use the default value of "CONFIG_MAP" + **name** | **str**| | [optional] + +### Return type + +**bool, date, datetime, dict, float, int, list, str, none_type** + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **get_sync_limit** +> SyncSyncLimitResponse get_sync_limit(namespace, key) + + + +### Example + +* Api Key Authentication (BearerToken): + +```python +import time +import argo_workflows +from argo_workflows.api import sync_service_api +from argo_workflows.model.grpc_gateway_runtime_error import GrpcGatewayRuntimeError +from argo_workflows.model.sync_sync_limit_response import SyncSyncLimitResponse +from pprint import pprint +# Defining the host is optional and defaults to http://localhost:2746 +# See configuration.py for a list of all supported configuration parameters. +configuration = argo_workflows.Configuration( + host = "http://localhost:2746" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: BearerToken +configuration.api_key['BearerToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['BearerToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with argo_workflows.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = sync_service_api.SyncServiceApi(api_client) + namespace = "namespace_example" # str | + key = "key_example" # str | + type = "CONFIG_MAP" # str | (optional) if omitted the server will use the default value of "CONFIG_MAP" + name = "name_example" # str | (optional) + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.get_sync_limit(namespace, key) + pprint(api_response) + except argo_workflows.ApiException as e: + print("Exception when calling SyncServiceApi->get_sync_limit: %s\n" % e) + + # example passing only required values which don't have defaults set + # and optional values + try: + api_response = api_instance.get_sync_limit(namespace, key, type=type, name=name) + pprint(api_response) + except argo_workflows.ApiException as e: + print("Exception when calling SyncServiceApi->get_sync_limit: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **str**| | + **key** | **str**| | + **type** | **str**| | [optional] if omitted the server will use the default value of "CONFIG_MAP" + **name** | **str**| | [optional] + +### Return type + +[**SyncSyncLimitResponse**](SyncSyncLimitResponse.md) + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **update_sync_limit** +> SyncSyncLimitResponse update_sync_limit(namespace, key, body) + + + +### Example + +* Api Key Authentication (BearerToken): + +```python +import time +import argo_workflows +from argo_workflows.api import sync_service_api +from argo_workflows.model.grpc_gateway_runtime_error import GrpcGatewayRuntimeError +from argo_workflows.model.sync_sync_limit_response import SyncSyncLimitResponse +from argo_workflows.model.sync_update_sync_limit_request import SyncUpdateSyncLimitRequest +from pprint import pprint +# Defining the host is optional and defaults to http://localhost:2746 +# See configuration.py for a list of all supported configuration parameters. +configuration = argo_workflows.Configuration( + host = "http://localhost:2746" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: BearerToken +configuration.api_key['BearerToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['BearerToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with argo_workflows.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = sync_service_api.SyncServiceApi(api_client) + namespace = "namespace_example" # str | + key = "key_example" # str | + body = SyncUpdateSyncLimitRequest( + key="key_example", + name="name_example", + namespace="namespace_example", + size_limit=1, + type=SyncSyncConfigType("CONFIG_MAP"), + ) # SyncUpdateSyncLimitRequest | + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.update_sync_limit(namespace, key, body) + pprint(api_response) + except argo_workflows.ApiException as e: + print("Exception when calling SyncServiceApi->update_sync_limit: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **namespace** | **str**| | + **key** | **str**| | + **body** | [**SyncUpdateSyncLimitRequest**](SyncUpdateSyncLimitRequest.md)| | + +### Return type + +[**SyncSyncLimitResponse**](SyncSyncLimitResponse.md) + +### Authorization + +[BearerToken](../README.md#BearerToken) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | A successful response. | - | +**0** | An unexpected error response. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/python/client/docs/SyncSyncConfigType.md b/sdks/python/client/docs/SyncSyncConfigType.md new file mode 100644 index 000000000000..30753e9982ad --- /dev/null +++ b/sdks/python/client/docs/SyncSyncConfigType.md @@ -0,0 +1,11 @@ +# SyncSyncConfigType + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | | defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/python/client/docs/SyncSyncLimitResponse.md b/sdks/python/client/docs/SyncSyncLimitResponse.md new file mode 100644 index 000000000000..a489ec922771 --- /dev/null +++ b/sdks/python/client/docs/SyncSyncLimitResponse.md @@ -0,0 +1,16 @@ +# SyncSyncLimitResponse + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**key** | **str** | | [optional] +**name** | **str** | | [optional] +**namespace** | **str** | | [optional] +**size_limit** | **int** | | [optional] +**type** | [**SyncSyncConfigType**](SyncSyncConfigType.md) | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/python/client/docs/SyncUpdateSyncLimitRequest.md b/sdks/python/client/docs/SyncUpdateSyncLimitRequest.md new file mode 100644 index 000000000000..484738c504bb --- /dev/null +++ b/sdks/python/client/docs/SyncUpdateSyncLimitRequest.md @@ -0,0 +1,16 @@ +# SyncUpdateSyncLimitRequest + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**key** | **str** | | [optional] +**name** | **str** | | [optional] +**namespace** | **str** | | [optional] +**size_limit** | **int** | | [optional] +**type** | [**SyncSyncConfigType**](SyncSyncConfigType.md) | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/server/apiserver/argoserver.go b/server/apiserver/argoserver.go index bcd25186a8c5..71c085c91847 100644 --- a/server/apiserver/argoserver.go +++ b/server/apiserver/argoserver.go @@ -34,6 +34,7 @@ import ( eventsourcepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/eventsource" infopkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/info" sensorpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sensor" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive" workflowtemplatepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate" @@ -51,6 +52,7 @@ import ( "github.com/argoproj/argo-workflows/v3/server/info" "github.com/argoproj/argo-workflows/v3/server/sensor" "github.com/argoproj/argo-workflows/v3/server/static" + "github.com/argoproj/argo-workflows/v3/server/sync" "github.com/argoproj/argo-workflows/v3/server/types" "github.com/argoproj/argo-workflows/v3/server/workflow" "github.com/argoproj/argo-workflows/v3/server/workflow/store" @@ -253,12 +255,13 @@ func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(st artifactServer := artifacts.NewArtifactServer(as.gatekeeper, hydrator.New(offloadRepo), wfArchive, instanceIDService, artifactRepositories, log) eventServer := event.NewController(ctx, instanceIDService, eventRecorderManager, as.eventQueueSize, as.eventWorkerCount, as.eventAsyncDispatch) wfArchiveServer := workflowarchive.NewWorkflowArchiveServer(wfArchive, offloadRepo, config.WorkflowDefaults) + syncServer := sync.NewSyncServer() wfStore, err := store.NewSQLiteStore(instanceIDService) if err != nil { log.WithFatal().Error(ctx, err.Error()) } workflowServer := workflow.NewWorkflowServer(ctx, instanceIDService, offloadRepo, wfArchive, as.clients.Workflow, wfStore, wfStore, wftmplStore, cwftmplInformer, config.WorkflowDefaults, &resourceCacheNamespace) - grpcServer := as.newGRPCServer(ctx, instanceIDService, workflowServer, wftmplStore, cwftmplInformer, wfArchiveServer, eventServer, config.Links, config.Columns, config.NavColor, config.WorkflowDefaults) + grpcServer := as.newGRPCServer(ctx, instanceIDService, workflowServer, wftmplStore, cwftmplInformer, wfArchiveServer, syncServer, eventServer, config.Links, config.Columns, config.NavColor, config.WorkflowDefaults) httpServer := as.newHTTPServer(ctx, port, artifactServer) // Start listener @@ -304,7 +307,7 @@ func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(st <-as.stopCh } -func (as *argoServer) newGRPCServer(ctx context.Context, instanceIDService instanceid.Service, workflowServer workflowpkg.WorkflowServiceServer, wftmplStore types.WorkflowTemplateStore, cwftmplStore types.ClusterWorkflowTemplateStore, wfArchiveServer workflowarchivepkg.ArchivedWorkflowServiceServer, eventServer *event.Controller, links []*v1alpha1.Link, columns []*v1alpha1.Column, navColor string, wfDefaults *v1alpha1.Workflow) *grpc.Server { +func (as *argoServer) newGRPCServer(ctx context.Context, instanceIDService instanceid.Service, workflowServer workflowpkg.WorkflowServiceServer, wftmplStore types.WorkflowTemplateStore, cwftmplStore types.ClusterWorkflowTemplateStore, wfArchiveServer workflowarchivepkg.ArchivedWorkflowServiceServer, syncServer syncpkg.SyncServiceServer, eventServer *event.Controller, links []*v1alpha1.Link, columns []*v1alpha1.Column, navColor string, wfDefaults *v1alpha1.Workflow) *grpc.Server { serverLog := logging.RequireLoggerFromContext(ctx) // "Prometheus histograms are a great way to measure latency distributions of your RPCs. However, since it is bad practice to have metrics of high cardinality the latency monitoring metrics are disabled by default. To enable them please call the following in your server initialization code:" @@ -347,6 +350,7 @@ func (as *argoServer) newGRPCServer(ctx context.Context, instanceIDService insta cronworkflowpkg.RegisterCronWorkflowServiceServer(grpcServer, cronworkflow.NewCronWorkflowServer(instanceIDService, wftmplStore, cwftmplStore, wfDefaults)) workflowarchivepkg.RegisterArchivedWorkflowServiceServer(grpcServer, wfArchiveServer) clusterwftemplatepkg.RegisterClusterWorkflowTemplateServiceServer(grpcServer, clusterworkflowtemplate.NewClusterWorkflowTemplateServer(instanceIDService, cwftmplStore, wfDefaults)) + syncpkg.RegisterSyncServiceServer(grpcServer, syncServer) grpc_prometheus.Register(grpcServer) return grpcServer } @@ -404,6 +408,7 @@ func (as *argoServer) newHTTPServer(ctx context.Context, port int, artifactServe mustRegisterGWHandler(cronworkflowpkg.RegisterCronWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(workflowarchivepkg.RegisterArchivedWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(clusterwftemplatepkg.RegisterClusterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) + mustRegisterGWHandler(syncpkg.RegisterSyncServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mux.HandleFunc("/api/", func(w http.ResponseWriter, r *http.Request) { // we must delete this header for API request to prevent "stream terminated by RST_STREAM with error code: PROTOCOL_ERROR" error diff --git a/server/sync/sync_server.go b/server/sync/sync_server.go new file mode 100644 index 000000000000..e74f88ba1f0f --- /dev/null +++ b/server/sync/sync_server.go @@ -0,0 +1,157 @@ +package sync + +import ( + "context" + "fmt" + "strconv" + + "google.golang.org/grpc/codes" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" + "github.com/argoproj/argo-workflows/v3/server/auth" + sutils "github.com/argoproj/argo-workflows/v3/server/utils" +) + +type syncServer struct { +} + +func NewSyncServer() *syncServer { + return &syncServer{} +} + +func (s *syncServer) CreateSyncLimit(ctx context.Context, req *syncpkg.CreateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) { + if req.SizeLimit <= 0 { + return nil, sutils.ToStatusError(fmt.Errorf("size limit must be greater than zero"), codes.InvalidArgument) + } + + kubeClient := auth.GetKubeClient(ctx) + + configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace) + + cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{}) + if err == nil { + _, has := cm.Data[req.Key] + if has { + return nil, fmt.Errorf("sync limit cannot be created as it already exists") + } + return s.updateSyncLimit(ctx, &syncpkg.UpdateSyncLimitRequest{ + Name: req.Name, + Namespace: req.Namespace, + Key: req.Key, + SizeLimit: req.SizeLimit, + }, false) + } + + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Name, + Namespace: req.Namespace, + }, + Data: map[string]string{ + req.Key: fmt.Sprint(req.SizeLimit), + }, + } + + cm, err = configmapGetter.Create(ctx, cm, metav1.CreateOptions{}) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + + return &syncpkg.SyncLimitResponse{ + Name: cm.Name, + Namespace: cm.Namespace, + Key: req.Key, + SizeLimit: req.SizeLimit, + }, nil +} + +func (s *syncServer) GetSyncLimit(ctx context.Context, req *syncpkg.GetSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) { + kubeClient := auth.GetKubeClient(ctx) + + configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace) + + cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{}) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + + sizeLimit, ok := cm.Data[req.Key] + if !ok { + return nil, sutils.ToStatusError(fmt.Errorf("key %s not found in configmap %s/%s", req.Key, cm.Namespace, cm.Name), codes.NotFound) + } + + parsedSizeLimit, err := strconv.Atoi(sizeLimit) + if err != nil { + return nil, sutils.ToStatusError(fmt.Errorf("invalid size limit format for key %s in configmap %s/%s", req.Key, cm.Namespace, cm.Name), codes.InvalidArgument) + } + + return &syncpkg.SyncLimitResponse{ + Name: cm.Name, + Namespace: cm.Namespace, + Key: req.Key, + SizeLimit: int32(parsedSizeLimit), + }, nil +} + +func (s *syncServer) UpdateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) { + if req.SizeLimit <= 0 { + return nil, sutils.ToStatusError(fmt.Errorf("size limit must be greater than zero"), codes.InvalidArgument) + } + + return s.updateSyncLimit(ctx, req, true) +} + +func (s *syncServer) updateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest, shouldFieldExist bool) (*syncpkg.SyncLimitResponse, error) { + kubeClient := auth.GetKubeClient(ctx) + + configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace) + + cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{}) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + + if cm.Data == nil { + cm.Data = make(map[string]string) + } + + if _, ok := cm.Data[req.Key]; shouldFieldExist && !ok { + return nil, sutils.ToStatusError(fmt.Errorf("key %s not found in configmap %s/%s - please create it first", req.Key, cm.Namespace, cm.Name), codes.NotFound) + } + + cm.Data[req.Key] = strconv.Itoa(int(req.SizeLimit)) + + cm, err = configmapGetter.Update(ctx, cm, metav1.UpdateOptions{}) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + + return &syncpkg.SyncLimitResponse{ + Name: cm.Name, + Namespace: cm.Namespace, + Key: req.Key, + SizeLimit: req.SizeLimit, + }, nil +} + +func (s *syncServer) DeleteSyncLimit(ctx context.Context, req *syncpkg.DeleteSyncLimitRequest) (*syncpkg.DeleteSyncLimitResponse, error) { + kubeClient := auth.GetKubeClient(ctx) + + configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace) + + cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{}) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + + delete(cm.Data, req.Key) + + _, err = configmapGetter.Update(ctx, cm, metav1.UpdateOptions{}) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) + } + + return &syncpkg.DeleteSyncLimitResponse{}, nil +} diff --git a/server/sync/sync_server_test.go b/server/sync/sync_server_test.go new file mode 100644 index 000000000000..acb042f1e1b9 --- /dev/null +++ b/server/sync/sync_server_test.go @@ -0,0 +1,571 @@ +package sync + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" + "github.com/argoproj/argo-workflows/v3/server/auth" +) + +func withKubeClient(kubeClient *fake.Clientset) context.Context { + return context.WithValue(context.Background(), auth.KubeKey, kubeClient) +} + +func Test_syncServer_CreateSyncLimit(t *testing.T) { + t.Run("SizeLimit <= 0", func(t *testing.T) { + server := NewSyncServer() + ctx := context.Background() + + req := &syncpkg.CreateSyncLimitRequest{ + Name: "test-cm", + Namespace: "test-ns", + Key: "test-key", + SizeLimit: 0, + } + + _, err := server.CreateSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, statusErr.Code()) + require.Contains(t, statusErr.Message(), "size limit must be greater than zero") + }) + + t.Run("Error creating ConfigMap", func(t *testing.T) { + server := NewSyncServer() + kubeClient := fake.NewSimpleClientset() + + kubeClient.PrependReactor("create", "configmaps", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, nil, apierrors.NewForbidden( + schema.GroupResource{Group: "", Resource: "configmaps"}, + "test-cm", + errors.New("namespace not found"), + ) + }) + + ctx := context.WithValue(context.Background(), auth.KubeKey, kubeClient) + + req := &syncpkg.CreateSyncLimitRequest{ + Name: "test-cm", + Namespace: "non-existent-ns", + Key: "test-key", + SizeLimit: 100, + } + + _, err := server.CreateSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.PermissionDenied, statusErr.Code()) + require.Contains(t, statusErr.Message(), "namespace not found") + }) + + t.Run("Create new ConfigMap", func(t *testing.T) { + server := NewSyncServer() + kubeClient := fake.NewSimpleClientset() + ctx := withKubeClient(kubeClient) + + req := &syncpkg.CreateSyncLimitRequest{ + Name: "test-cm", + Namespace: "test-ns", + Key: "test-key", + SizeLimit: 100, + } + + resp, err := server.CreateSyncLimit(ctx, req) + + require.NoError(t, err) + require.Equal(t, "test-cm", resp.Name) + require.Equal(t, "test-ns", resp.Namespace) + require.Equal(t, "test-key", resp.Key) + require.Equal(t, int32(100), resp.SizeLimit) + }) + + t.Run("ConfigMap already exists", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "existing-key": "50", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.CreateSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "new-key", + SizeLimit: 200, + } + + resp, err := server.CreateSyncLimit(ctx, req) + + require.NoError(t, err) + require.Equal(t, "existing-cm", resp.Name) + require.Equal(t, "test-ns", resp.Namespace) + require.Equal(t, "new-key", resp.Key) + require.Equal(t, int32(200), resp.SizeLimit) + }) + + t.Run("ConfigMap exists with nil Data", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nil-data-cm", + Namespace: "test-ns", + }, + Data: nil, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.CreateSyncLimitRequest{ + Name: "nil-data-cm", + Namespace: "test-ns", + Key: "test-key", + SizeLimit: 300, + } + + resp, err := server.CreateSyncLimit(ctx, req) + + require.NoError(t, err) + require.Equal(t, "nil-data-cm", resp.Name) + require.Equal(t, "test-key", resp.Key) + require.Equal(t, int32(300), resp.SizeLimit) + }) +} + +func Test_syncServer_GetSyncLimit(t *testing.T) { + t.Run("ConfigMap doesn't exist", func(t *testing.T) { + server := NewSyncServer() + kubeClient := fake.NewSimpleClientset() + ctx := withKubeClient(kubeClient) + + req := &syncpkg.GetSyncLimitRequest{ + Name: "non-existent-cm", + Namespace: "test-ns", + Key: "test-key", + } + + _, err := server.GetSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "not found") + }) + + t.Run("Key doesn't exist", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "existing-key": "100", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.GetSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "non-existent-key", + } + + _, err := server.GetSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "key non-existent-key not found") + }) + + t.Run("Invalid size limit format", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "invalid-key": "not-a-number", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.GetSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "invalid-key", + } + + _, err := server.GetSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, statusErr.Code()) + require.Contains(t, statusErr.Message(), "invalid size limit format") + }) + + t.Run("Successfully get sync limit", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "valid-key": "500", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.GetSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "valid-key", + } + + resp, err := server.GetSyncLimit(ctx, req) + + require.NoError(t, err) + require.Equal(t, "existing-cm", resp.Name) + require.Equal(t, "test-ns", resp.Namespace) + require.Equal(t, "valid-key", resp.Key) + require.Equal(t, int32(500), resp.SizeLimit) + }) +} + +func Test_syncServer_UpdateSyncLimit(t *testing.T) { + t.Run("SizeLimit <= 0", func(t *testing.T) { + server := NewSyncServer() + ctx := context.Background() + + req := &syncpkg.UpdateSyncLimitRequest{ + Name: "test-cm", + Namespace: "test-ns", + Key: "test-key", + SizeLimit: 0, + } + + _, err := server.UpdateSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, statusErr.Code()) + require.Contains(t, statusErr.Message(), "size limit must be greater than zero") + }) + + t.Run("ConfigMap doesn't exist", func(t *testing.T) { + server := NewSyncServer() + kubeClient := fake.NewSimpleClientset() + ctx := withKubeClient(kubeClient) + + req := &syncpkg.UpdateSyncLimitRequest{ + Name: "non-existent-cm", + Namespace: "test-ns", + Key: "test-key", + SizeLimit: 100, + } + + _, err := server.UpdateSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "not found") + }) + + t.Run("ConfigMap with nil Data", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nil-data-cm", + Namespace: "test-ns", + }, + Data: nil, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.UpdateSyncLimitRequest{ + Name: "nil-data-cm", + Namespace: "test-ns", + Key: "test-key", + SizeLimit: 200, + } + + _, err := server.UpdateSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "please create it first") + }) + + t.Run("Key doesn't exist", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "existing-key": "100", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.UpdateSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "non-existent-key", + SizeLimit: 200, + } + + _, err := server.UpdateSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "please create it first") + }) + + t.Run("Error updating ConfigMap", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "existing-key": "100", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + + kubeClient.PrependReactor("update", "configmaps", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.New("update error") + }) + + ctx := withKubeClient(kubeClient) + + req := &syncpkg.UpdateSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "existing-key", + SizeLimit: 200, + } + + _, err := server.UpdateSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Internal, statusErr.Code()) + require.Contains(t, statusErr.Message(), "update error") + }) + + t.Run("Successfully update sync limit", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "existing-key": "100", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.UpdateSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "existing-key", + SizeLimit: 300, + } + + resp, err := server.UpdateSyncLimit(ctx, req) + + require.NoError(t, err) + require.Equal(t, "existing-cm", resp.Name) + require.Equal(t, "test-ns", resp.Namespace) + require.Equal(t, "existing-key", resp.Key) + require.Equal(t, int32(300), resp.SizeLimit) + }) +} + +func Test_syncServer_DeleteSyncLimit(t *testing.T) { + t.Run("ConfigMap doesn't exist", func(t *testing.T) { + server := NewSyncServer() + kubeClient := fake.NewSimpleClientset() + ctx := withKubeClient(kubeClient) + + req := &syncpkg.DeleteSyncLimitRequest{ + Name: "non-existent-cm", + Namespace: "test-ns", + Key: "test-key", + } + + _, err := server.DeleteSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, statusErr.Code()) + require.Contains(t, statusErr.Message(), "not found") + }) + + t.Run("ConfigMap with nil Data", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nil-data-cm", + Namespace: "test-ns", + }, + Data: nil, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.DeleteSyncLimitRequest{ + Name: "nil-data-cm", + Namespace: "test-ns", + Key: "test-key", + } + + _, err := server.DeleteSyncLimit(ctx, req) + + require.NoError(t, err) + }) + + t.Run("ConfigMap with empty Data", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-data-cm", + Namespace: "test-ns", + }, + Data: map[string]string{}, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.DeleteSyncLimitRequest{ + Name: "empty-data-cm", + Namespace: "test-ns", + Key: "test-key", + } + + _, err := server.DeleteSyncLimit(ctx, req) + + require.NoError(t, err) + }) + + t.Run("Error updating ConfigMap", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "existing-key": "100", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + + kubeClient.PrependReactor("update", "configmaps", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.New("update error") + }) + + ctx := withKubeClient(kubeClient) + + req := &syncpkg.DeleteSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "existing-key", + } + + _, err := server.DeleteSyncLimit(ctx, req) + + require.Error(t, err) + statusErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Internal, statusErr.Code()) + require.Contains(t, statusErr.Message(), "update error") + }) + + t.Run("Successfully delete sync limit", func(t *testing.T) { + server := NewSyncServer() + + existingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-cm", + Namespace: "test-ns", + }, + Data: map[string]string{ + "key1": "100", + "key2": "200", + }, + } + kubeClient := fake.NewSimpleClientset(existingCM) + ctx := withKubeClient(kubeClient) + + req := &syncpkg.DeleteSyncLimitRequest{ + Name: "existing-cm", + Namespace: "test-ns", + Key: "key1", + } + + _, err := server.DeleteSyncLimit(ctx, req) + + require.NoError(t, err) + }) +} diff --git a/test/e2e/argo_server_test.go b/test/e2e/argo_server_test.go index 80590d43c2ab..84a4fb6d2c80 100644 --- a/test/e2e/argo_server_test.go +++ b/test/e2e/argo_server_test.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" @@ -2612,6 +2613,106 @@ spec: } +func (s *ArgoServerSuite) TestSyncService() { + syncNamespace := "argo" + configmapName := "test-sync-cm" + syncKey := "test-key" + + s.Run("CreateSyncLimit", func() { + s.e().POST("/api/v1/sync/{namespace}", syncNamespace). + WithJSON(syncpkg.CreateSyncLimitRequest{ + Name: configmapName, + Key: syncKey, + SizeLimit: 100, + }). + Expect(). + Status(200). + JSON().Object(). + HasValue("name", configmapName). + HasValue("key", syncKey). + HasValue("sizeLimit", 100) + }) + + s.Run("CreateSyncLimit-cm-exist", func() { + s.e().POST("/api/v1/sync/{namespace}", syncNamespace). + WithJSON(syncpkg.CreateSyncLimitRequest{ + Name: configmapName, + Key: syncKey + "-exist", + SizeLimit: 100, + }). + Expect(). + Status(200). + JSON().Object(). + HasValue("name", configmapName). + HasValue("key", syncKey+"-exist"). + HasValue("sizeLimit", 100) + }) + + s.Run("GetSyncLimit", func() { + s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey). + WithQuery("name", configmapName). + Expect(). + Status(200). + JSON().Object(). + HasValue("name", configmapName). + HasValue("key", syncKey). + HasValue("sizeLimit", 100) + }) + + s.Run("UpdateSyncLimit", func() { + s.e().PUT("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey). + WithJSON(syncpkg.UpdateSyncLimitRequest{ + Name: configmapName, + SizeLimit: 200, + }). + Expect(). + Status(200). + JSON().Object(). + HasValue("name", configmapName). + HasValue("key", syncKey). + HasValue("sizeLimit", 200) + }) + + s.Run("InvalidSizeLimit", func() { + s.e().POST("/api/v1/sync/{namespace}", syncNamespace). + WithJSON(syncpkg.CreateSyncLimitRequest{ + Name: configmapName + "-invalid", + Key: syncKey, + SizeLimit: 0, + }). + Expect(). + Status(400) + }) + + s.Run("KeyDoesNotExist", func() { + s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey+"-non-existent"). + WithQuery("name", configmapName). + Expect(). + Status(404) + }) + + s.Run("DeleteSyncLimit", func() { + s.e().DELETE("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey). + WithQuery("name", configmapName). + Expect(). + Status(200) + + s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey). + WithQuery("name", configmapName). + Expect(). + Status(404) + }) + + s.Run("UpdateNonExistentLimit", func() { + s.e().PUT("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey+"-non-existent"). + WithJSON(syncpkg.UpdateSyncLimitRequest{ + Name: configmapName, + SizeLimit: 200, + }).Expect(). + Status(404) + }) +} + func TestArgoServerSuite(t *testing.T) { suite.Run(t, new(ArgoServerSuite)) } diff --git a/test/e2e/cli_test.go b/test/e2e/cli_test.go index 3b4384cd46c8..ffbb39320721 100644 --- a/test/e2e/cli_test.go +++ b/test/e2e/cli_test.go @@ -1805,6 +1805,47 @@ func (s *CLISuite) TestArchive() { }) } +func (s *CLISuite) TestSyncCLI() { + s.Given(). + RunCli([]string{"sync", "configmap", "create", "test-sync-configmap", "--key", "test-key", "--size-limit", "1000"}, func(t *testing.T, output string, err error) { + require.NoError(t, err) + assert.Contains(t, output, "Configmap sync limit created") + assert.Contains(t, output, "key test-key") + assert.Contains(t, output, "size limit 1000") + }) + + s.Run("Get ConfigMap sync config", func() { + s.Given(). + RunCli([]string{"sync", "configmap", "get", "test-sync-configmap", "--key", "test-key"}, func(t *testing.T, output string, err error) { + require.NoError(t, err) + assert.Contains(t, output, "Sync Configmap name: test-sync-configmap") + assert.Contains(t, output, "Namespace: argo") + assert.Contains(t, output, "Size Limit: 1000") + }) + }) + + s.Run("Update ConfigMap sync configs", func() { + s.Given(). + RunCli([]string{"sync", "configmap", "update", "test-sync-configmap", "--key", "test-key", "--size-limit", "2000"}, func(t *testing.T, output string, err error) { + require.NoError(t, err) + assert.Contains(t, output, "Updated sync limit for ConfigMap test-sync-configmap") + assert.Contains(t, output, "key test-key") + assert.Contains(t, output, "size limit 2000") + }) + }) + + s.Run("Delete ConfigMap sync config", func() { + s.Given(). + RunCli([]string{"sync", "configmap", "delete", "test-sync-configmap", "--key", "test-key"}, func(t *testing.T, output string, err error) { + require.NoError(t, err) + assert.Contains(t, output, "Deleted sync limit for ConfigMap test-sync-configmap") + assert.Contains(t, output, "argo namespace") + assert.Contains(t, output, "key test-key") + }) + }) + +} + func (s *CLISuite) TestArchiveLabel() { s.Given(). WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml").