diff --git a/.features/pending/db-sync-cli.md b/.features/pending/db-sync-cli.md
new file mode 100644
index 000000000000..d2a673ade5df
--- /dev/null
+++ b/.features/pending/db-sync-cli.md
@@ -0,0 +1,7 @@
+Description: Add support for creating a database semaphore config using CLI
+Author: [Darko Janjic](https://github.com/djanjic)
+Component: CLI
+Issues: 14783
+
+Allow user to create a database semaphore configuration using CLI
+
diff --git a/.mockery.yaml b/.mockery.yaml
index efd06318eac6..4f41e848a1ad 100644
--- a/.mockery.yaml
+++ b/.mockery.yaml
@@ -31,3 +31,6 @@ packages:
github.com/argoproj/argo-workflows/v3/workflow/sync:
interfaces:
Throttler: {}
+ github.com/argoproj/argo-workflows/v3/util/sync/db:
+ interfaces:
+ SyncQueries: {}
diff --git a/.spelling b/.spelling
index ca6942c81ee8..101c7ddf34f5 100644
--- a/.spelling
+++ b/.spelling
@@ -214,6 +214,7 @@ parameterizing
params
pprof
pre-commit
+programmatically
pytorch
qps
ray
diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json
index 5f5c0a07d0f6..3cfc9173d041 100644
--- a/api/jsonschema/schema.json
+++ b/api/jsonschema/schema.json
@@ -11696,18 +11696,18 @@
},
"sync.CreateSyncLimitRequest": {
"properties": {
- "key": {
+ "cmName": {
"type": "string"
},
- "name": {
+ "key": {
"type": "string"
},
+ "limit": {
+ "type": "integer"
+ },
"namespace": {
"type": "string"
},
- "sizeLimit": {
- "type": "integer"
- },
"type": {
"$ref": "#/definitions/sync.SyncConfigType"
}
@@ -11718,27 +11718,27 @@
"type": "object"
},
"sync.SyncConfigType": {
- "default": "CONFIG_MAP",
+ "default": "CONFIGMAP",
"enum": [
- "CONFIG_MAP",
+ "CONFIGMAP",
"DATABASE"
],
"type": "string"
},
"sync.SyncLimitResponse": {
"properties": {
- "key": {
+ "cmName": {
"type": "string"
},
- "name": {
+ "key": {
"type": "string"
},
+ "limit": {
+ "type": "integer"
+ },
"namespace": {
"type": "string"
},
- "sizeLimit": {
- "type": "integer"
- },
"type": {
"$ref": "#/definitions/sync.SyncConfigType"
}
@@ -11747,18 +11747,18 @@
},
"sync.UpdateSyncLimitRequest": {
"properties": {
- "key": {
+ "cmName": {
"type": "string"
},
- "name": {
+ "key": {
"type": "string"
},
+ "limit": {
+ "type": "integer"
+ },
"namespace": {
"type": "string"
},
- "sizeLimit": {
- "type": "integer"
- },
"type": {
"$ref": "#/definitions/sync.SyncConfigType"
}
diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json
index 2ba4f21e7494..10caa24d9c31 100644
--- a/api/openapi-spec/swagger.json
+++ b/api/openapi-spec/swagger.json
@@ -2377,17 +2377,17 @@
},
{
"enum": [
- "CONFIG_MAP",
+ "CONFIGMAP",
"DATABASE"
],
"type": "string",
- "default": "CONFIG_MAP",
+ "default": "CONFIGMAP",
"name": "type",
"in": "query"
},
{
"type": "string",
- "name": "name",
+ "name": "cmName",
"in": "query"
}
],
@@ -2468,17 +2468,17 @@
},
{
"enum": [
- "CONFIG_MAP",
+ "CONFIGMAP",
"DATABASE"
],
"type": "string",
- "default": "CONFIG_MAP",
+ "default": "CONFIGMAP",
"name": "type",
"in": "query"
},
{
"type": "string",
- "name": "name",
+ "name": "cmName",
"in": "query"
}
],
@@ -15950,18 +15950,18 @@
"sync.CreateSyncLimitRequest": {
"type": "object",
"properties": {
- "key": {
+ "cmName": {
"type": "string"
},
- "name": {
+ "key": {
"type": "string"
},
+ "limit": {
+ "type": "integer"
+ },
"namespace": {
"type": "string"
},
- "sizeLimit": {
- "type": "integer"
- },
"type": {
"$ref": "#/definitions/sync.SyncConfigType"
}
@@ -15972,27 +15972,27 @@
},
"sync.SyncConfigType": {
"type": "string",
- "default": "CONFIG_MAP",
+ "default": "CONFIGMAP",
"enum": [
- "CONFIG_MAP",
+ "CONFIGMAP",
"DATABASE"
]
},
"sync.SyncLimitResponse": {
"type": "object",
"properties": {
- "key": {
+ "cmName": {
"type": "string"
},
- "name": {
+ "key": {
"type": "string"
},
+ "limit": {
+ "type": "integer"
+ },
"namespace": {
"type": "string"
},
- "sizeLimit": {
- "type": "integer"
- },
"type": {
"$ref": "#/definitions/sync.SyncConfigType"
}
@@ -16001,18 +16001,18 @@
"sync.UpdateSyncLimitRequest": {
"type": "object",
"properties": {
- "key": {
+ "cmName": {
"type": "string"
},
- "name": {
+ "key": {
"type": "string"
},
+ "limit": {
+ "type": "integer"
+ },
"namespace": {
"type": "string"
},
- "sizeLimit": {
- "type": "integer"
- },
"type": {
"$ref": "#/definitions/sync.SyncConfigType"
}
diff --git a/cmd/argo/commands/sync/configmap/create.go b/cmd/argo/commands/sync/configmap/create.go
deleted file mode 100644
index b91b7f9c9468..000000000000
--- a/cmd/argo/commands/sync/configmap/create.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package sync
-
-import (
- "context"
- "fmt"
-
- "github.com/spf13/cobra"
-
- "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
- syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
- "github.com/argoproj/argo-workflows/v3/util/errors"
-)
-
-type cliCreateOpts struct {
- key string // --key
- sizeLimit int32 // --size-limit
-}
-
-func NewCreateCommand() *cobra.Command {
-
- var cliCreateOpts = cliCreateOpts{}
-
- command := &cobra.Command{
- Use: "create",
- Short: "Create a configmap sync limit",
- Args: cobra.ExactArgs(1),
- Example: `argo sync configmap create my-cm --key my-key --size-limit 10`,
- RunE: func(cmd *cobra.Command, args []string) error {
- return CreateSyncLimitCommand(cmd.Context(), args[0], &cliCreateOpts)
- },
- }
-
- command.Flags().StringVar(&cliCreateOpts.key, "key", "", "Key of the sync limit")
- command.Flags().Int32Var(&cliCreateOpts.sizeLimit, "size-limit", 0, "Size limit of the sync limit")
-
- ctx := command.Context()
- err := command.MarkFlagRequired("key")
- errors.CheckError(ctx, err)
-
- err = command.MarkFlagRequired("size-limit")
- errors.CheckError(ctx, err)
-
- return command
-}
-
-func CreateSyncLimitCommand(ctx context.Context, cmName string, cliOpts *cliCreateOpts) error {
- ctx, apiClient, err := client.NewAPIClient(ctx)
- if err != nil {
- return err
- }
- serviceClient, err := apiClient.NewSyncServiceClient()
- if err != nil {
- return err
- }
-
- req := &syncpkg.CreateSyncLimitRequest{
- Name: cmName,
- Namespace: client.Namespace(ctx),
- Key: cliOpts.key,
- SizeLimit: cliOpts.sizeLimit,
- Type: syncpkg.SyncConfigType_CONFIG_MAP,
- }
-
- resp, err := serviceClient.CreateSyncLimit(ctx, req)
- if err != nil {
- return fmt.Errorf("failed to create sync limit: %v", err)
- }
-
- fmt.Printf("Configmap sync limit created: %s/%s with key %s and size limit %d\n", resp.Namespace, resp.Name, resp.Key, resp.SizeLimit)
-
- return nil
-}
diff --git a/cmd/argo/commands/sync/configmap/delete.go b/cmd/argo/commands/sync/configmap/delete.go
deleted file mode 100644
index d01085a199ae..000000000000
--- a/cmd/argo/commands/sync/configmap/delete.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package sync
-
-import (
- "context"
- "fmt"
-
- "github.com/spf13/cobra"
-
- "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
- syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
- "github.com/argoproj/argo-workflows/v3/util/errors"
-)
-
-type cliDeleteOpts struct {
- key string // --key
-}
-
-func NewDeleteCommand() *cobra.Command {
- var cliDeleteOpts = cliDeleteOpts{}
-
- command := &cobra.Command{
- Use: "delete",
- Short: "Delete a configmap sync limit",
- Args: cobra.ExactArgs(1),
- Example: `argo sync configmap delete my-cm --key my-key`,
- RunE: func(cmd *cobra.Command, args []string) error {
- return DeleteSyncLimitCommand(cmd.Context(), args[0], &cliDeleteOpts)
- },
- }
-
- command.Flags().StringVar(&cliDeleteOpts.key, "key", "", "Key of the sync limit")
-
- err := command.MarkFlagRequired("key")
- errors.CheckError(command.Context(), err)
-
- return command
-}
-
-func DeleteSyncLimitCommand(ctx context.Context, cmName string, cliDeleteOpts *cliDeleteOpts) error {
- ctx, apiClient, err := client.NewAPIClient(ctx)
- if err != nil {
- return err
- }
- serviceClient, err := apiClient.NewSyncServiceClient()
- if err != nil {
- return err
- }
-
- namespace := client.Namespace(ctx)
- req := &syncpkg.DeleteSyncLimitRequest{
- Name: cmName,
- Namespace: namespace,
- Key: cliDeleteOpts.key,
- Type: syncpkg.SyncConfigType_CONFIG_MAP,
- }
-
- if _, err := serviceClient.DeleteSyncLimit(ctx, req); err != nil {
- return fmt.Errorf("failed to delete sync limit: %v", err)
- }
-
- fmt.Printf("Deleted sync limit for ConfigMap %s from %s namespace with key %s\n", cmName, namespace, cliDeleteOpts.key)
- return nil
-}
diff --git a/cmd/argo/commands/sync/configmap/get.go b/cmd/argo/commands/sync/configmap/get.go
deleted file mode 100644
index fbef4c21e8b9..000000000000
--- a/cmd/argo/commands/sync/configmap/get.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package sync
-
-import (
- "context"
- "fmt"
-
- "github.com/spf13/cobra"
-
- "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
- syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
- "github.com/argoproj/argo-workflows/v3/util/errors"
-)
-
-type cliGetOpts struct {
- key string // --key
-}
-
-func NewGetCommand() *cobra.Command {
- var cliGetOpts = cliGetOpts{}
- command := &cobra.Command{
- Use: "get",
- Short: "Get a configmap sync limit",
- Args: cobra.ExactArgs(1),
- Example: `argo sync configmap get my-cm --key my-key`,
- RunE: func(cmd *cobra.Command, args []string) error {
- return GetSyncLimitCommand(cmd.Context(), args[0], &cliGetOpts)
- },
- }
-
- command.Flags().StringVar(&cliGetOpts.key, "key", "", "Key of the sync limit")
-
- err := command.MarkFlagRequired("key")
- errors.CheckError(command.Context(), err)
-
- return command
-}
-
-func GetSyncLimitCommand(ctx context.Context, cmName string, cliGetOpts *cliGetOpts) error {
- ctx, apiClient, err := client.NewAPIClient(ctx)
- if err != nil {
- return err
- }
- serviceClient, err := apiClient.NewSyncServiceClient()
- if err != nil {
- return err
- }
-
- req := &syncpkg.GetSyncLimitRequest{
- Name: cmName,
- Namespace: client.Namespace(ctx),
- Key: cliGetOpts.key,
- Type: syncpkg.SyncConfigType_CONFIG_MAP,
- }
-
- resp, err := serviceClient.GetSyncLimit(ctx, req)
- if err != nil {
- return fmt.Errorf("failed to get sync limit: %v", err)
- }
-
- fmt.Printf("Sync Configmap name: %s\nNamespace: %s\nKey: %s\nSize Limit: %d\n", resp.Name, resp.Namespace, resp.Key, resp.SizeLimit)
- return nil
-}
diff --git a/cmd/argo/commands/sync/configmap/root.go b/cmd/argo/commands/sync/configmap/root.go
deleted file mode 100644
index 22c59caae2ec..000000000000
--- a/cmd/argo/commands/sync/configmap/root.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package sync
-
-import (
- "github.com/spf13/cobra"
-)
-
-func NewConfigmapCommand() *cobra.Command {
- command := &cobra.Command{
- Use: "configmap",
- Aliases: []string{"cm"},
- Short: "manage configmap sync limits",
- RunE: func(cmd *cobra.Command, args []string) error {
- return cmd.Help()
- },
- }
-
- command.AddCommand(NewCreateCommand())
- command.AddCommand(NewGetCommand())
- command.AddCommand(NewDeleteCommand())
- command.AddCommand(NewUpdateCommand())
-
- return command
-}
diff --git a/cmd/argo/commands/sync/configmap/update.go b/cmd/argo/commands/sync/configmap/update.go
deleted file mode 100644
index 75ed341fccb3..000000000000
--- a/cmd/argo/commands/sync/configmap/update.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package sync
-
-import (
- "context"
- "fmt"
-
- "github.com/spf13/cobra"
-
- "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
- syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
- "github.com/argoproj/argo-workflows/v3/util/errors"
-)
-
-type cliUpdateOpts struct {
- key string // --key
- sizeLimit int32 // --size-limit
-}
-
-func NewUpdateCommand() *cobra.Command {
- var cliUpdateOpts = cliUpdateOpts{}
-
- command := &cobra.Command{
- Use: "update",
- Short: "Update a configmap sync limit",
- Args: cobra.ExactArgs(1),
- Example: `argo sync configmap update my-cm --key my-key --size-limit 20`,
- RunE: func(cmd *cobra.Command, args []string) error {
- return UpdateSyncLimitCommand(cmd.Context(), args[0], &cliUpdateOpts)
- },
- }
-
- command.Flags().StringVar(&cliUpdateOpts.key, "key", "", "Key of the sync limit")
- command.Flags().Int32Var(&cliUpdateOpts.sizeLimit, "size-limit", 0, "Size limit of the sync limit")
-
- ctx := command.Context()
- err := command.MarkFlagRequired("key")
- errors.CheckError(ctx, err)
-
- err = command.MarkFlagRequired("size-limit")
- errors.CheckError(ctx, err)
-
- return command
-}
-
-func UpdateSyncLimitCommand(ctx context.Context, cmName string, cliOpts *cliUpdateOpts) error {
- ctx, apiClient, err := client.NewAPIClient(ctx)
- if err != nil {
- return err
- }
- serviceClient, err := apiClient.NewSyncServiceClient()
- if err != nil {
- return err
- }
-
- req := &syncpkg.UpdateSyncLimitRequest{
- Name: cmName,
- Namespace: client.Namespace(ctx),
- Key: cliOpts.key,
- SizeLimit: cliOpts.sizeLimit,
- Type: syncpkg.SyncConfigType_CONFIG_MAP,
- }
-
- resp, err := serviceClient.UpdateSyncLimit(ctx, req)
- if err != nil {
- return fmt.Errorf("failed to update sync limit: %v", err)
- }
-
- fmt.Printf("Updated sync limit for ConfigMap %s from namespace %s with key %s to size limit %d\n", resp.Name, resp.Namespace, resp.Key, resp.SizeLimit)
- return nil
-}
diff --git a/cmd/argo/commands/sync/create.go b/cmd/argo/commands/sync/create.go
new file mode 100644
index 000000000000..c4ceac4ce183
--- /dev/null
+++ b/cmd/argo/commands/sync/create.go
@@ -0,0 +1,88 @@
+package sync
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+ "github.com/argoproj/argo-workflows/v3/util/errors"
+)
+
+type cliCreateOpts struct {
+ limit int32 // --limit
+ syncType string // --type
+ cmName string // --cm-name
+}
+
+func NewCreateCommand() *cobra.Command {
+
+ var cliCreateOpts = cliCreateOpts{}
+
+ command := &cobra.Command{
+ Use: "create",
+ Short: "Create a sync limit",
+ Args: cobra.ExactArgs(1),
+ Example: `
+# Create a database sync limit:
+ argo sync create my-key --type database --limit 10
+
+# Create a configmap sync limit:
+ argo sync create my-key --type configmap --cm-name my-configmap --limit 10
+`,
+
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ cliCreateOpts.syncType = strings.ToUpper(cliCreateOpts.syncType)
+ return validateFlags(cliCreateOpts.syncType, cliCreateOpts.cmName)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return CreateSyncLimitCommand(cmd.Context(), args[0], &cliCreateOpts)
+ },
+ }
+
+ command.Flags().Int32Var(&cliCreateOpts.limit, "limit", 0, "Sync limit")
+ command.Flags().StringVar(&cliCreateOpts.syncType, "type", "", "Type of sync limit (database or configmap)")
+ command.Flags().StringVar(&cliCreateOpts.cmName, "cm-name", "", "ConfigMap name (required if type is configmap)")
+
+ ctx := command.Context()
+
+ err := command.MarkFlagRequired("limit")
+ errors.CheckError(ctx, err)
+
+ err = command.MarkFlagRequired("type")
+ errors.CheckError(ctx, err)
+
+ return command
+}
+
+func CreateSyncLimitCommand(ctx context.Context, key string, cliOpts *cliCreateOpts) error {
+ ctx, apiClient, err := client.NewAPIClient(ctx)
+ if err != nil {
+ return err
+ }
+ serviceClient, err := apiClient.NewSyncServiceClient(ctx)
+ if err != nil {
+ return err
+ }
+
+ req := &syncpkg.CreateSyncLimitRequest{
+ CmName: cliOpts.cmName,
+ Namespace: client.Namespace(ctx),
+ Key: key,
+ Limit: cliOpts.limit,
+ Type: syncpkg.SyncConfigType(syncpkg.SyncConfigType_value[cliOpts.syncType]),
+ }
+
+ resp, err := serviceClient.CreateSyncLimit(ctx, req)
+ if err != nil {
+ return fmt.Errorf("failed to create sync limit: %v", err)
+ }
+
+ fmt.Printf("Sync limit created\n")
+ printSyncLimit(resp.Key, resp.CmName, resp.Namespace, resp.Limit, resp.Type)
+
+ return nil
+}
diff --git a/cmd/argo/commands/sync/delete.go b/cmd/argo/commands/sync/delete.go
new file mode 100644
index 000000000000..b67962120dd1
--- /dev/null
+++ b/cmd/argo/commands/sync/delete.go
@@ -0,0 +1,77 @@
+package sync
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+ "github.com/argoproj/argo-workflows/v3/util/errors"
+)
+
+type cliDeleteOpts struct {
+ syncType string // --type
+ cmName string // --cm-name
+}
+
+func NewDeleteCommand() *cobra.Command {
+ var cliDeleteOpts = cliDeleteOpts{}
+
+ command := &cobra.Command{
+ Use: "delete",
+ Short: "Delete a sync limit",
+ Args: cobra.ExactArgs(1),
+ Example: `
+# Delete a database sync limit
+ argo sync delete my-key --type database
+
+# Delete a configmap sync limit
+ argo sync delete my-key --type configmap --cm-name my-configmap
+`,
+
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ cliDeleteOpts.syncType = strings.ToUpper(cliDeleteOpts.syncType)
+ return validateFlags(cliDeleteOpts.syncType, cliDeleteOpts.cmName)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return DeleteSyncLimitCommand(cmd.Context(), args[0], &cliDeleteOpts)
+ },
+ }
+
+ command.Flags().StringVar(&cliDeleteOpts.syncType, "type", "", "Type of sync limit (database or configmap)")
+ command.Flags().StringVar(&cliDeleteOpts.cmName, "cm-name", "", "ConfigMap name (required if type is configmap)")
+
+ err := command.MarkFlagRequired("type")
+ errors.CheckError(command.Context(), err)
+
+ return command
+}
+
+func DeleteSyncLimitCommand(ctx context.Context, key string, cliDeleteOpts *cliDeleteOpts) error {
+ ctx, apiClient, err := client.NewAPIClient(ctx)
+ if err != nil {
+ return err
+ }
+ serviceClient, err := apiClient.NewSyncServiceClient(ctx)
+ if err != nil {
+ return err
+ }
+
+ namespace := client.Namespace(ctx)
+ req := &syncpkg.DeleteSyncLimitRequest{
+ CmName: cliDeleteOpts.cmName,
+ Namespace: namespace,
+ Key: key,
+ Type: syncpkg.SyncConfigType(syncpkg.SyncConfigType_value[cliDeleteOpts.syncType]),
+ }
+
+ if _, err := serviceClient.DeleteSyncLimit(ctx, req); err != nil {
+ return fmt.Errorf("failed to delete sync limit: %v", err)
+ }
+
+ fmt.Printf("Sync limit deleted\n")
+ return nil
+}
diff --git a/cmd/argo/commands/sync/get.go b/cmd/argo/commands/sync/get.go
new file mode 100644
index 000000000000..1a65b85e2ba8
--- /dev/null
+++ b/cmd/argo/commands/sync/get.go
@@ -0,0 +1,76 @@
+package sync
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+ "github.com/argoproj/argo-workflows/v3/util/errors"
+)
+
+type cliGetOpts struct {
+ syncType string // --type
+ cmName string // --cm-name
+}
+
+func NewGetCommand() *cobra.Command {
+ var cliGetOpts = cliGetOpts{}
+ command := &cobra.Command{
+ Use: "get",
+ Short: "Get a sync limit",
+ Args: cobra.ExactArgs(1),
+ Example: `
+# Get a database sync limit
+ argo sync get my-key --type database
+
+# Get a configmap sync limit
+ argo sync get my-key --type configmap --cm-name my-configmap
+`,
+
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ cliGetOpts.syncType = strings.ToUpper(cliGetOpts.syncType)
+ return validateFlags(cliGetOpts.syncType, cliGetOpts.cmName)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return GetSyncLimitCommand(cmd.Context(), args[0], &cliGetOpts)
+ },
+ }
+
+ command.Flags().StringVar(&cliGetOpts.syncType, "type", "", "Type of sync limit (database or configmap)")
+ command.Flags().StringVar(&cliGetOpts.cmName, "cm-name", "", "ConfigMap name (required if type is configmap)")
+
+ err := command.MarkFlagRequired("type")
+ errors.CheckError(command.Context(), err)
+
+ return command
+}
+
+func GetSyncLimitCommand(ctx context.Context, key string, cliGetOpts *cliGetOpts) error {
+ ctx, apiClient, err := client.NewAPIClient(ctx)
+ if err != nil {
+ return err
+ }
+ serviceClient, err := apiClient.NewSyncServiceClient(ctx)
+ if err != nil {
+ return err
+ }
+
+ req := &syncpkg.GetSyncLimitRequest{
+ CmName: cliGetOpts.cmName,
+ Namespace: client.Namespace(ctx),
+ Key: key,
+ Type: syncpkg.SyncConfigType(syncpkg.SyncConfigType_value[cliGetOpts.syncType]),
+ }
+
+ resp, err := serviceClient.GetSyncLimit(ctx, req)
+ if err != nil {
+ return fmt.Errorf("failed to get sync limit: %v", err)
+ }
+
+ printSyncLimit(resp.Key, resp.CmName, resp.Namespace, resp.Limit, resp.Type)
+ return nil
+}
diff --git a/cmd/argo/commands/sync/root.go b/cmd/argo/commands/sync/root.go
index a2441bff05de..d83cb114dd1c 100644
--- a/cmd/argo/commands/sync/root.go
+++ b/cmd/argo/commands/sync/root.go
@@ -2,8 +2,6 @@ package sync
import (
"github.com/spf13/cobra"
-
- configmap "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/sync/configmap"
)
func NewSyncCommand() *cobra.Command {
@@ -15,7 +13,10 @@ func NewSyncCommand() *cobra.Command {
},
}
- command.AddCommand(configmap.NewConfigmapCommand())
+ command.AddCommand(NewCreateCommand())
+ command.AddCommand(NewUpdateCommand())
+ command.AddCommand(NewDeleteCommand())
+ command.AddCommand(NewGetCommand())
return command
}
diff --git a/cmd/argo/commands/sync/update.go b/cmd/argo/commands/sync/update.go
new file mode 100644
index 000000000000..b550d4d23a3f
--- /dev/null
+++ b/cmd/argo/commands/sync/update.go
@@ -0,0 +1,85 @@
+package sync
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+ "github.com/argoproj/argo-workflows/v3/util/errors"
+)
+
+type cliUpdateOpts struct {
+ limit int32 // --limit
+ syncType string // --type
+ cmName string // --cm-name
+}
+
+func NewUpdateCommand() *cobra.Command {
+ var cliUpdateOpts = cliUpdateOpts{}
+
+ command := &cobra.Command{
+ Use: "update",
+ Short: "Update a configmap sync limit",
+ Args: cobra.ExactArgs(1),
+ Example: `
+# Update a database sync limit
+ argo sync update my-key --type database --size-limit 20
+
+# Update a configmap sync limit
+ argo sync update my-key --type configmap --cm-name my-configmap --size-limit 20
+`,
+
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ cliUpdateOpts.syncType = strings.ToUpper(cliUpdateOpts.syncType)
+ return validateFlags(cliUpdateOpts.syncType, cliUpdateOpts.cmName)
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return UpdateSyncLimitCommand(cmd.Context(), args[0], &cliUpdateOpts)
+ },
+ }
+
+ command.Flags().StringVar(&cliUpdateOpts.cmName, "cm-name", "", "ConfigMap name (required if type is configmap)")
+ command.Flags().Int32Var(&cliUpdateOpts.limit, "limit", 0, "Limit of the sync limit")
+ command.Flags().StringVar(&cliUpdateOpts.syncType, "type", "", "Type of sync limit (database or configmap)")
+
+ ctx := command.Context()
+ err := command.MarkFlagRequired("type")
+ errors.CheckError(ctx, err)
+
+ err = command.MarkFlagRequired("limit")
+ errors.CheckError(ctx, err)
+
+ return command
+}
+
+func UpdateSyncLimitCommand(ctx context.Context, key string, cliOpts *cliUpdateOpts) error {
+ ctx, apiClient, err := client.NewAPIClient(ctx)
+ if err != nil {
+ return err
+ }
+ serviceClient, err := apiClient.NewSyncServiceClient(ctx)
+ if err != nil {
+ return err
+ }
+
+ req := &syncpkg.UpdateSyncLimitRequest{
+ CmName: cliOpts.cmName,
+ Namespace: client.Namespace(ctx),
+ Key: key,
+ Limit: cliOpts.limit,
+ Type: syncpkg.SyncConfigType(syncpkg.SyncConfigType_value[cliOpts.syncType]),
+ }
+
+ resp, err := serviceClient.UpdateSyncLimit(ctx, req)
+ if err != nil {
+ return fmt.Errorf("failed to update sync limit: %v", err)
+ }
+
+ fmt.Printf("Sync limit updated\n")
+ printSyncLimit(resp.Key, resp.CmName, resp.Namespace, resp.Limit, resp.Type)
+ return nil
+}
diff --git a/cmd/argo/commands/sync/util.go b/cmd/argo/commands/sync/util.go
new file mode 100644
index 000000000000..706bf9ef6443
--- /dev/null
+++ b/cmd/argo/commands/sync/util.go
@@ -0,0 +1,30 @@
+package sync
+
+import (
+ "fmt"
+ "strings"
+
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+)
+
+func validateFlags(syncType, cmName string) error {
+ if _, ok := syncpkg.SyncConfigType_value[syncType]; !ok {
+ return fmt.Errorf("--type must be either 'database' or 'configmap'")
+ }
+
+ if syncType == syncpkg.SyncConfigType_CONFIGMAP.String() && cmName == "" {
+ return fmt.Errorf("--cm-name is required when type is configmap")
+ }
+
+ return nil
+}
+
+func printSyncLimit(key, cmName, namespace string, limit int32, syncType syncpkg.SyncConfigType) {
+ fmt.Printf("Key: %s\n", key)
+ fmt.Printf("Type: %s\n", strings.ToLower(syncType.String()))
+ if syncType == syncpkg.SyncConfigType_CONFIGMAP {
+ fmt.Printf("ConfigMap Name: %s\n", cmName)
+ }
+ fmt.Printf("Namespace: %s\n", namespace)
+ fmt.Printf("Limit: %d\n", limit)
+}
diff --git a/cmd/argo/commands/sync/util_test.go b/cmd/argo/commands/sync/util_test.go
new file mode 100644
index 000000000000..76fd25a6ff50
--- /dev/null
+++ b/cmd/argo/commands/sync/util_test.go
@@ -0,0 +1,23 @@
+package sync
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestValidateFlags(t *testing.T) {
+ err := validateFlags("DATABASE", "")
+ require.NoError(t, err)
+
+ err = validateFlags("CONFIGMAP", "my-cm")
+ require.NoError(t, err)
+
+ err = validateFlags("INVALID", "")
+ require.Error(t, err)
+ require.Equal(t, "--type must be either 'database' or 'configmap'", err.Error())
+
+ err = validateFlags("CONFIGMAP", "")
+ require.Error(t, err)
+ require.Equal(t, "--cm-name is required when type is configmap", err.Error())
+}
diff --git a/config/config.go b/config/config.go
index 338ee78262ea..c30bab83c88c 100644
--- a/config/config.go
+++ b/config/config.go
@@ -238,6 +238,8 @@ func (c PersistConfig) GetClusterName() string {
// SyncConfig contains synchronization configuration for database locks (semaphores and mutexes)
type SyncConfig struct {
DBConfig
+ // EnableAPI enables the database synchronization API
+ EnableAPI bool `json:"enableAPI,omitempty"`
// ControllerName sets a unique name for this controller instance
ControllerName string `json:"controllerName"`
// SkipMigration skips database migration if needed
diff --git a/docs/cli/argo_sync.md b/docs/cli/argo_sync.md
index d3e0d99a9717..a12a582f75ae 100644
--- a/docs/cli/argo_sync.md
+++ b/docs/cli/argo_sync.md
@@ -51,5 +51,8 @@ argo sync [flags]
### SEE ALSO
* [argo](argo.md) - argo is the command line interface to Argo
-* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits
+* [argo sync create](argo_sync_create.md) - Create a sync limit
+* [argo sync delete](argo_sync_delete.md) - Delete a sync limit
+* [argo sync get](argo_sync_get.md) - Get a sync limit
+* [argo sync update](argo_sync_update.md) - Update a configmap sync limit
diff --git a/docs/cli/argo_sync_configmap_get.md b/docs/cli/argo_sync_configmap_get.md
deleted file mode 100644
index be7666b0a10b..000000000000
--- a/docs/cli/argo_sync_configmap_get.md
+++ /dev/null
@@ -1,61 +0,0 @@
-## argo sync configmap get
-
-Get a configmap sync limit
-
-```
-argo sync configmap get [flags]
-```
-
-### Examples
-
-```
-argo sync configmap get my-cm --key my-key
-```
-
-### Options
-
-```
- -h, --help help for get
- --key string Key of the sync limit
-```
-
-### Options inherited from parent commands
-
-```
- --argo-base-href string Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable.
- --argo-http1 If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.
- -s, --argo-server host:port API server host:port. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.
- --as string Username to impersonate for the operation
- --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups.
- --as-uid string UID to impersonate for the operation
- --certificate-authority string Path to a cert file for the certificate authority
- --client-certificate string Path to a client certificate file for TLS
- --client-key string Path to a client key file for TLS
- --cluster string The name of the kubeconfig cluster to use
- --context string The name of the kubeconfig context to use
- --disable-compression If true, opt-out of response compression for all requests to the server
- --gloglevel int Set the glog logging level
- -H, --header strings Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.
- --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
- -k, --insecure-skip-verify If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.
- --instanceid string submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.
- --kubeconfig string Path to a kube config. Only required if out-of-cluster
- --log-format string The formatter to use for logs. One of: text|json (default "text")
- --loglevel string Set the logging level. One of: debug|info|warn|error (default "info")
- -n, --namespace string If present, the namespace scope for this CLI request
- --password string Password for basic authentication to the API server
- --proxy-url string If provided, this URL will be used to connect via proxy
- --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0")
- -e, --secure Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable. (default true)
- --server string The address and port of the Kubernetes API server
- --tls-server-name string If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used.
- --token string Bearer token for authentication to the API server
- --user string The name of the kubeconfig user to use
- --username string Username for basic authentication to the API server
- -v, --verbose Enabled verbose logging, i.e. --loglevel debug
-```
-
-### SEE ALSO
-
-* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits
-
diff --git a/docs/cli/argo_sync_configmap.md b/docs/cli/argo_sync_create.md
similarity index 88%
rename from docs/cli/argo_sync_configmap.md
rename to docs/cli/argo_sync_create.md
index 9ca9e7ceb5b3..75e49113dc48 100644
--- a/docs/cli/argo_sync_configmap.md
+++ b/docs/cli/argo_sync_create.md
@@ -1,15 +1,30 @@
-## argo sync configmap
+## argo sync create
-manage configmap sync limits
+Create a sync limit
```
-argo sync configmap [flags]
+argo sync create [flags]
+```
+
+### Examples
+
+```
+
+# Create a database sync limit:
+ argo sync create my-key --type database --limit 10
+
+# Create a configmap sync limit:
+ argo sync create my-key --type configmap --cm-name my-configmap --limit 10
+
```
### Options
```
- -h, --help help for configmap
+ --cm-name string ConfigMap name (required if type is configmap)
+ -h, --help help for create
+ --limit int32 Sync limit
+ --type string Type of sync limit (database or configmap)
```
### Options inherited from parent commands
@@ -51,8 +66,4 @@ argo sync configmap [flags]
### SEE ALSO
* [argo sync](argo_sync.md) - manage sync limits
-* [argo sync configmap create](argo_sync_configmap_create.md) - Create a configmap sync limit
-* [argo sync configmap delete](argo_sync_configmap_delete.md) - Delete a configmap sync limit
-* [argo sync configmap get](argo_sync_configmap_get.md) - Get a configmap sync limit
-* [argo sync configmap update](argo_sync_configmap_update.md) - Update a configmap sync limit
diff --git a/docs/cli/argo_sync_configmap_create.md b/docs/cli/argo_sync_delete.md
similarity index 88%
rename from docs/cli/argo_sync_configmap_create.md
rename to docs/cli/argo_sync_delete.md
index 185b5e2ef61a..40f4272be281 100644
--- a/docs/cli/argo_sync_configmap_create.md
+++ b/docs/cli/argo_sync_delete.md
@@ -1,23 +1,29 @@
-## argo sync configmap create
+## argo sync delete
-Create a configmap sync limit
+Delete a sync limit
```
-argo sync configmap create [flags]
+argo sync delete [flags]
```
### Examples
```
-argo sync configmap create my-cm --key my-key --size-limit 10
+
+# Delete a database sync limit
+ argo sync delete my-key --type database
+
+# Delete a configmap sync limit
+ argo sync delete my-key --type configmap --cm-name my-configmap
+
```
### Options
```
- -h, --help help for create
- --key string Key of the sync limit
- --size-limit int32 Size limit of the sync limit
+ --cm-name string ConfigMap name (required if type is configmap)
+ -h, --help help for delete
+ --type string Type of sync limit (database or configmap)
```
### Options inherited from parent commands
@@ -58,5 +64,5 @@ argo sync configmap create my-cm --key my-key --size-limit 10
### SEE ALSO
-* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits
+* [argo sync](argo_sync.md) - manage sync limits
diff --git a/docs/cli/argo_sync_configmap_update.md b/docs/cli/argo_sync_get.md
similarity index 89%
rename from docs/cli/argo_sync_configmap_update.md
rename to docs/cli/argo_sync_get.md
index d7370ccab3d7..fc2b11d61bc2 100644
--- a/docs/cli/argo_sync_configmap_update.md
+++ b/docs/cli/argo_sync_get.md
@@ -1,23 +1,29 @@
-## argo sync configmap update
+## argo sync get
-Update a configmap sync limit
+Get a sync limit
```
-argo sync configmap update [flags]
+argo sync get [flags]
```
### Examples
```
-argo sync configmap update my-cm --key my-key --size-limit 20
+
+# Get a database sync limit
+ argo sync get my-key --type database
+
+# Get a configmap sync limit
+ argo sync get my-key --type configmap --cm-name my-configmap
+
```
### Options
```
- -h, --help help for update
- --key string Key of the sync limit
- --size-limit int32 Size limit of the sync limit
+ --cm-name string ConfigMap name (required if type is configmap)
+ -h, --help help for get
+ --type string Type of sync limit (database or configmap)
```
### Options inherited from parent commands
@@ -58,5 +64,5 @@ argo sync configmap update my-cm --key my-key --size-limit 20
### SEE ALSO
-* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits
+* [argo sync](argo_sync.md) - manage sync limits
diff --git a/docs/cli/argo_sync_configmap_delete.md b/docs/cli/argo_sync_update.md
similarity index 87%
rename from docs/cli/argo_sync_configmap_delete.md
rename to docs/cli/argo_sync_update.md
index 8b24809ffadd..d09f8ea79fe4 100644
--- a/docs/cli/argo_sync_configmap_delete.md
+++ b/docs/cli/argo_sync_update.md
@@ -1,22 +1,30 @@
-## argo sync configmap delete
+## argo sync update
-Delete a configmap sync limit
+Update a configmap sync limit
```
-argo sync configmap delete [flags]
+argo sync update [flags]
```
### Examples
```
-argo sync configmap delete my-cm --key my-key
+
+# Update a database sync limit
+ argo sync update my-key --type database --size-limit 20
+
+# Update a configmap sync limit
+ argo sync update my-key --type configmap --cm-name my-configmap --size-limit 20
+
```
### Options
```
- -h, --help help for delete
- --key string Key of the sync limit
+ --cm-name string ConfigMap name (required if type is configmap)
+ -h, --help help for update
+ --limit int32 Limit of the sync limit
+ --type string Type of sync limit (database or configmap)
```
### Options inherited from parent commands
@@ -57,5 +65,5 @@ argo sync configmap delete my-cm --key my-key
### SEE ALSO
-* [argo sync configmap](argo_sync_configmap.md) - manage configmap sync limits
+* [argo sync](argo_sync.md) - manage sync limits
diff --git a/docs/synchronization-config.md b/docs/synchronization-config.md
new file mode 100644
index 000000000000..2272e222824e
--- /dev/null
+++ b/docs/synchronization-config.md
@@ -0,0 +1,89 @@
+# Managing Synchronization Limits via API
+
+This page explains how to manage synchronization limits (semaphores) using the Argo Server API and CLI.
+
+## Overview
+
+Argo Workflows provides two ways to configure synchronization limits:
+
+1. **ConfigMap-based limits** (always available) - Define limits in Kubernetes ConfigMaps
+2. **Database-based limits** (requires configuration) - Store limits in a shared database for cross-cluster synchronization
+Use the API/CLI approach when you need to:
+
+- Adjust semaphore limits dynamically without redeploying ConfigMaps
+- Manage limits programmatically or from CI/CD pipelines
+- Provision synchronization limits as part of automated infrastructure setup
+For CLI usage, refer to the [CLI documentation](cli/argo_sync.md).
+For API specifications, see the [Swagger documentation](swagger.md).
+
+## ConfigMap-based Limits
+
+ConfigMap-based limits are always available through the API and CLI.
+No additional configuration is required.
+The API allows administrators to create, read, update, and delete semaphore configurations stored in ConfigMaps without manually editing YAML files.
+This is controlled via standard kubernetes RBAC.
+
+## Database-based Limits
+
+Database-based limits allow multiple workflow controllers (typically across different clusters) to share synchronization state.
+
+### Prerequisites
+
+Before you can manage database limits via the API, you must:
+
+1. Configure a PostgreSQL or MySQL database for synchronization (see [workflow synchronization](synchronization.md#database-configuration))
+2. Enable the synchronization API in your workflow controller configuration
+
+### Enable the API
+
+Add this configuration to your [workflow-controller-configmap.yaml](workflow-controller-configmap.yaml):
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: workflow-controller-configmap
+data:
+ config: |
+ synchronization:
+ enableAPI: true
+ # Database configuration is also required - see synchronization.md
+```
+
+!!! Warning
+ Setting `enableAPI: true` only enables the API endpoints.
+ You must also configure the database connection settings as described in the [synchronization documentation](synchronization.md#database-configuration).
+!!! Warning
+ Deleting a semaphore that is currently in use is allowed.
+ Workflows attempting to take it after deletion will error.
+
+## Permissions
+
+### ConfigMap Limits
+
+To manage ConfigMap-based limits, users need Kubernetes RBAC permissions to create, read, update, or delete ConfigMaps in the target namespace.
+The API server enforces these permissions through standard Kubernetes RBAC.
+
+### Database Limits
+
+Database limits are not backed by Kubernetes resources, so Kubernetes RBAC cannot directly control access to them.
+Instead, the Argo Server uses `workflow` permissions as a proxy:
+
+- To **create** a database limit: requires permission to create workflows in the namespace
+- To **get** a database limit: requires permission to get workflows in the namespace
+- To **update** a database limit: requires permission to update workflows in the namespace
+- To **delete** a database limit: requires permission to delete workflows in the namespace
+For example, a user with this RBAC policy:
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: workflow-operator
+rules:
+ - apiGroups: ["argoproj.io"]
+ resources: ["workflows"]
+ verbs: ["create", "get", "update", "delete"]
+```
+
+can perform all operations on database semaphores in that namespace.
diff --git a/docs/workflow-controller-configmap.md b/docs/workflow-controller-configmap.md
index a58cf5010385..a4b74fa371a2 100644
--- a/docs/workflow-controller-configmap.md
+++ b/docs/workflow-controller-configmap.md
@@ -319,6 +319,7 @@ SyncConfig contains synchronization configuration for database locks (semaphores
| `PostgreSQL` | [`PostgreSQLConfig`](#postgresqlconfig) | PostgreSQL configuration for PostgreSQL database, don't use MySQL at the same time |
| `MySQL` | [`MySQLConfig`](#mysqlconfig) | MySQL configuration for MySQL database, don't use PostgreSQL at the same time |
| `ConnectionPool` | [`ConnectionPool`](#connectionpool) | Pooled connection settings for all types of database connections |
+| `EnableAPI` | `bool` | EnableAPI enables the database synchronization API |
| `ControllerName` | `string` | ControllerName sets a unique name for this controller instance |
| `SkipMigration` | `bool` | SkipMigration skips database migration if needed |
| `LimitTableName` | `string` | LimitTableName customizes the table name for semaphore limits, if not set, the default value is "sync_limit" |
diff --git a/docs/workflow-controller-configmap.yaml b/docs/workflow-controller-configmap.yaml
index 62eb65f303d7..c7af6c9b1c89 100644
--- a/docs/workflow-controller-configmap.yaml
+++ b/docs/workflow-controller-configmap.yaml
@@ -315,6 +315,9 @@ data:
# This enables coordination between multiple argo controller instances or across clusters
# Shares a similar structure with persistence configuration
synchronization: |
+ # Enable or disable database synchronization API (default: false)
+ enableAPI: false
+
# Connection pool settings, similar to persistence connectionPool
connectionPool:
maxIdleConns: 100
diff --git a/manifests/components/mysql/overlays/workflow-controller-configmap.yaml b/manifests/components/mysql/overlays/workflow-controller-configmap.yaml
index c83299074dc4..83b9ef02f388 100644
--- a/manifests/components/mysql/overlays/workflow-controller-configmap.yaml
+++ b/manifests/components/mysql/overlays/workflow-controller-configmap.yaml
@@ -23,6 +23,7 @@ data:
name: argo-mysql-config
key: password
synchronization: |
+ enableAPI: true
connectionPool:
maxIdleConns: 100
maxOpenConns: 0
diff --git a/manifests/components/postgres/overlays/workflow-controller-configmap.yaml b/manifests/components/postgres/overlays/workflow-controller-configmap.yaml
index b6cfa1869ef9..a903daff5a7e 100644
--- a/manifests/components/postgres/overlays/workflow-controller-configmap.yaml
+++ b/manifests/components/postgres/overlays/workflow-controller-configmap.yaml
@@ -23,6 +23,7 @@ data:
name: argo-postgres-config
key: password
synchronization: |
+ enableAPI: true
connectionPool:
maxIdleConns: 100
maxOpenConns: 0
diff --git a/manifests/quick-start-mysql.yaml b/manifests/quick-start-mysql.yaml
index 1eec707c19ae..d7c247288e6d 100644
--- a/manifests/quick-start-mysql.yaml
+++ b/manifests/quick-start-mysql.yaml
@@ -5489,6 +5489,7 @@ data:
failed: 3
errored: 3
synchronization: |
+ enableAPI: true
connectionPool:
maxIdleConns: 100
maxOpenConns: 0
diff --git a/manifests/quick-start-postgres.yaml b/manifests/quick-start-postgres.yaml
index 1d737b873e42..1a86a5b22bba 100644
--- a/manifests/quick-start-postgres.yaml
+++ b/manifests/quick-start-postgres.yaml
@@ -5489,6 +5489,7 @@ data:
failed: 3
errored: 3
synchronization: |
+ enableAPI: true
connectionPool:
maxIdleConns: 100
maxOpenConns: 0
diff --git a/mkdocs.yml b/mkdocs.yml
index 450baeba6bbf..613dd05905bd 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -229,11 +229,10 @@ nav:
- argo submit: cli/argo_submit.md
- argo suspend: cli/argo_suspend.md
- argo sync: cli/argo_sync.md
- - argo sync configmap: cli/argo_sync_configmap.md
- - argo sync configmap create: cli/argo_sync_configmap_create.md
- - argo sync configmap delete: cli/argo_sync_configmap_delete.md
- - argo sync configmap get: cli/argo_sync_configmap_get.md
- - argo sync configmap update: cli/argo_sync_configmap_update.md
+ - argo sync create: cli/argo_sync_create.md
+ - argo sync delete: cli/argo_sync_delete.md
+ - argo sync get: cli/argo_sync_get.md
+ - argo sync update: cli/argo_sync_update.md
- argo template: cli/argo_template.md
- argo template create: cli/argo_template_create.md
- argo template delete: cli/argo_template_delete.md
@@ -267,6 +266,7 @@ nav:
- workflow-restrictions.md
- sidecar-injection.md
- service-account-secrets.md
+ - synchronization-config.md
- parallelism.md
- Argo Server:
- argo-server.md
diff --git a/pkg/apiclient/apiclient.go b/pkg/apiclient/apiclient.go
index 1092992bbea7..2a9f4750ab93 100644
--- a/pkg/apiclient/apiclient.go
+++ b/pkg/apiclient/apiclient.go
@@ -24,7 +24,7 @@ type Client interface {
NewWorkflowTemplateServiceClient() (workflowtemplatepkg.WorkflowTemplateServiceClient, error)
NewClusterWorkflowTemplateServiceClient() (clusterworkflowtmplpkg.ClusterWorkflowTemplateServiceClient, error)
NewInfoServiceClient() (infopkg.InfoServiceClient, error)
- NewSyncServiceClient() (syncpkg.SyncServiceClient, error)
+ NewSyncServiceClient(ctx context.Context) (syncpkg.SyncServiceClient, error)
}
type Opts struct {
diff --git a/pkg/apiclient/argo-kube-client.go b/pkg/apiclient/argo-kube-client.go
index 4b8df41d73c2..65ce9835c618 100644
--- a/pkg/apiclient/argo-kube-client.go
+++ b/pkg/apiclient/argo-kube-client.go
@@ -204,6 +204,6 @@ func (a *argoKubeClient) NewClusterWorkflowTemplateServiceClient() (clusterworkf
return &errorTranslatingWorkflowClusterTemplateServiceClient{&argoKubeWorkflowClusterTemplateServiceClient{clusterworkflowtmplserver.NewClusterWorkflowTemplateServer(a.instanceIDService, a.cwfTmplStore, nil)}}, nil
}
-func (a *argoKubeClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) {
- return &errorTranslatingArgoKubeSyncServiceClient{&argoKubeSyncServiceClient{syncserver.NewSyncServer()}}, nil
+func (a *argoKubeClient) NewSyncServiceClient(ctx context.Context) (syncpkg.SyncServiceClient, error) {
+ return &errorTranslatingArgoKubeSyncServiceClient{&argoKubeSyncServiceClient{syncserver.NewSyncServer(ctx, nil, "", nil)}}, nil
}
diff --git a/pkg/apiclient/argo-server-client.go b/pkg/apiclient/argo-server-client.go
index 4468237f819e..d5b79a135bce 100644
--- a/pkg/apiclient/argo-server-client.go
+++ b/pkg/apiclient/argo-server-client.go
@@ -63,7 +63,7 @@ func (a *argoServerClient) NewInfoServiceClient() (infopkg.InfoServiceClient, er
return infopkg.NewInfoServiceClient(a.ClientConn), nil
}
-func (a *argoServerClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) {
+func (a *argoServerClient) NewSyncServiceClient(_ context.Context) (syncpkg.SyncServiceClient, error) {
return syncpkg.NewSyncServiceClient(a.ClientConn), nil
}
diff --git a/pkg/apiclient/http1-client.go b/pkg/apiclient/http1-client.go
index 92943375acca..2c5ffd3cd2a9 100644
--- a/pkg/apiclient/http1-client.go
+++ b/pkg/apiclient/http1-client.go
@@ -42,7 +42,7 @@ func (h httpClient) NewInfoServiceClient() (infopkg.InfoServiceClient, error) {
return http1.InfoServiceClient(h), nil
}
-func (h httpClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) {
+func (h httpClient) NewSyncServiceClient(_ context.Context) (syncpkg.SyncServiceClient, error) {
return http1.SyncServiceClient(h), nil
}
diff --git a/pkg/apiclient/offline-client.go b/pkg/apiclient/offline-client.go
index a9552dce29a0..d8de0f9555b5 100644
--- a/pkg/apiclient/offline-client.go
+++ b/pkg/apiclient/offline-client.go
@@ -136,7 +136,7 @@ func (c *offlineClient) NewInfoServiceClient() (infopkg.InfoServiceClient, error
return nil, ErrNoArgoServer
}
-func (c *offlineClient) NewSyncServiceClient() (syncpkg.SyncServiceClient, error) {
+func (c *offlineClient) NewSyncServiceClient(_ context.Context) (syncpkg.SyncServiceClient, error) {
return nil, ErrNoArgoServer
}
diff --git a/pkg/apiclient/sync/sync.pb.go b/pkg/apiclient/sync/sync.pb.go
index e6e74689bc90..d9a68642ac24 100644
--- a/pkg/apiclient/sync/sync.pb.go
+++ b/pkg/apiclient/sync/sync.pb.go
@@ -30,18 +30,18 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type SyncConfigType int32
const (
- SyncConfigType_CONFIG_MAP SyncConfigType = 0
- SyncConfigType_DATABASE SyncConfigType = 1
+ SyncConfigType_CONFIGMAP SyncConfigType = 0
+ SyncConfigType_DATABASE SyncConfigType = 1
)
var SyncConfigType_name = map[int32]string{
- 0: "CONFIG_MAP",
+ 0: "CONFIGMAP",
1: "DATABASE",
}
var SyncConfigType_value = map[string]int32{
- "CONFIG_MAP": 0,
- "DATABASE": 1,
+ "CONFIGMAP": 0,
+ "DATABASE": 1,
}
func (x SyncConfigType) String() string {
@@ -55,9 +55,9 @@ func (SyncConfigType) EnumDescriptor() ([]byte, []int) {
type CreateSyncLimitRequest struct {
Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ CmName string `protobuf:"bytes,3,opt,name=cmName,proto3" json:"cmName,omitempty"`
Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
- SizeLimit int32 `protobuf:"varint,5,opt,name=sizeLimit,proto3" json:"sizeLimit,omitempty"`
+ Limit int32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -100,7 +100,7 @@ func (m *CreateSyncLimitRequest) GetType() SyncConfigType {
if m != nil {
return m.Type
}
- return SyncConfigType_CONFIG_MAP
+ return SyncConfigType_CONFIGMAP
}
func (m *CreateSyncLimitRequest) GetNamespace() string {
@@ -110,9 +110,9 @@ func (m *CreateSyncLimitRequest) GetNamespace() string {
return ""
}
-func (m *CreateSyncLimitRequest) GetName() string {
+func (m *CreateSyncLimitRequest) GetCmName() string {
if m != nil {
- return m.Name
+ return m.CmName
}
return ""
}
@@ -124,9 +124,9 @@ func (m *CreateSyncLimitRequest) GetKey() string {
return ""
}
-func (m *CreateSyncLimitRequest) GetSizeLimit() int32 {
+func (m *CreateSyncLimitRequest) GetLimit() int32 {
if m != nil {
- return m.SizeLimit
+ return m.Limit
}
return 0
}
@@ -134,9 +134,9 @@ func (m *CreateSyncLimitRequest) GetSizeLimit() int32 {
type SyncLimitResponse struct {
Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ CmName string `protobuf:"bytes,3,opt,name=cmName,proto3" json:"cmName,omitempty"`
Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
- SizeLimit int32 `protobuf:"varint,5,opt,name=sizeLimit,proto3" json:"sizeLimit,omitempty"`
+ Limit int32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -179,7 +179,7 @@ func (m *SyncLimitResponse) GetType() SyncConfigType {
if m != nil {
return m.Type
}
- return SyncConfigType_CONFIG_MAP
+ return SyncConfigType_CONFIGMAP
}
func (m *SyncLimitResponse) GetNamespace() string {
@@ -189,9 +189,9 @@ func (m *SyncLimitResponse) GetNamespace() string {
return ""
}
-func (m *SyncLimitResponse) GetName() string {
+func (m *SyncLimitResponse) GetCmName() string {
if m != nil {
- return m.Name
+ return m.CmName
}
return ""
}
@@ -203,9 +203,9 @@ func (m *SyncLimitResponse) GetKey() string {
return ""
}
-func (m *SyncLimitResponse) GetSizeLimit() int32 {
+func (m *SyncLimitResponse) GetLimit() int32 {
if m != nil {
- return m.SizeLimit
+ return m.Limit
}
return 0
}
@@ -213,7 +213,7 @@ func (m *SyncLimitResponse) GetSizeLimit() int32 {
type GetSyncLimitRequest struct {
Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ CmName string `protobuf:"bytes,3,opt,name=cmName,proto3" json:"cmName,omitempty"`
Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -257,7 +257,7 @@ func (m *GetSyncLimitRequest) GetType() SyncConfigType {
if m != nil {
return m.Type
}
- return SyncConfigType_CONFIG_MAP
+ return SyncConfigType_CONFIGMAP
}
func (m *GetSyncLimitRequest) GetNamespace() string {
@@ -267,9 +267,9 @@ func (m *GetSyncLimitRequest) GetNamespace() string {
return ""
}
-func (m *GetSyncLimitRequest) GetName() string {
+func (m *GetSyncLimitRequest) GetCmName() string {
if m != nil {
- return m.Name
+ return m.CmName
}
return ""
}
@@ -284,9 +284,9 @@ func (m *GetSyncLimitRequest) GetKey() string {
type UpdateSyncLimitRequest struct {
Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ CmName string `protobuf:"bytes,3,opt,name=cmName,proto3" json:"cmName,omitempty"`
Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
- SizeLimit int32 `protobuf:"varint,5,opt,name=sizeLimit,proto3" json:"sizeLimit,omitempty"`
+ Limit int32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -329,7 +329,7 @@ func (m *UpdateSyncLimitRequest) GetType() SyncConfigType {
if m != nil {
return m.Type
}
- return SyncConfigType_CONFIG_MAP
+ return SyncConfigType_CONFIGMAP
}
func (m *UpdateSyncLimitRequest) GetNamespace() string {
@@ -339,9 +339,9 @@ func (m *UpdateSyncLimitRequest) GetNamespace() string {
return ""
}
-func (m *UpdateSyncLimitRequest) GetName() string {
+func (m *UpdateSyncLimitRequest) GetCmName() string {
if m != nil {
- return m.Name
+ return m.CmName
}
return ""
}
@@ -353,9 +353,9 @@ func (m *UpdateSyncLimitRequest) GetKey() string {
return ""
}
-func (m *UpdateSyncLimitRequest) GetSizeLimit() int32 {
+func (m *UpdateSyncLimitRequest) GetLimit() int32 {
if m != nil {
- return m.SizeLimit
+ return m.Limit
}
return 0
}
@@ -363,7 +363,7 @@ func (m *UpdateSyncLimitRequest) GetSizeLimit() int32 {
type DeleteSyncLimitRequest struct {
Type SyncConfigType `protobuf:"varint,1,opt,name=type,proto3,enum=sync.SyncConfigType" json:"type,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ CmName string `protobuf:"bytes,3,opt,name=cmName,proto3" json:"cmName,omitempty"`
Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -407,7 +407,7 @@ func (m *DeleteSyncLimitRequest) GetType() SyncConfigType {
if m != nil {
return m.Type
}
- return SyncConfigType_CONFIG_MAP
+ return SyncConfigType_CONFIGMAP
}
func (m *DeleteSyncLimitRequest) GetNamespace() string {
@@ -417,9 +417,9 @@ func (m *DeleteSyncLimitRequest) GetNamespace() string {
return ""
}
-func (m *DeleteSyncLimitRequest) GetName() string {
+func (m *DeleteSyncLimitRequest) GetCmName() string {
if m != nil {
- return m.Name
+ return m.CmName
}
return ""
}
@@ -484,36 +484,36 @@ func init() { proto.RegisterFile("pkg/apiclient/sync/sync.proto", fileDescriptor
var fileDescriptor_74ab334b2e266b46 = []byte{
// 483 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xcd, 0x6e, 0xd3, 0x40,
- 0x10, 0x66, 0x9b, 0x14, 0xd1, 0xa1, 0x4a, 0xc2, 0x82, 0x5a, 0x37, 0x4a, 0xa2, 0xc8, 0x95, 0x50,
- 0x88, 0x44, 0x2c, 0x8a, 0xb8, 0x70, 0x4b, 0x53, 0xa8, 0x90, 0xf8, 0x53, 0x52, 0x2e, 0x5c, 0x90,
- 0x6b, 0xa6, 0x66, 0x9b, 0x78, 0x77, 0xf1, 0x6e, 0x53, 0x99, 0xaa, 0x17, 0x90, 0x78, 0x01, 0x1e,
- 0x80, 0x0b, 0x0f, 0xc3, 0x11, 0x09, 0x71, 0x47, 0x11, 0x0f, 0x82, 0x76, 0xad, 0x36, 0x4d, 0x6a,
- 0xab, 0xc7, 0xf6, 0x62, 0x8d, 0x67, 0xc7, 0xf3, 0x7d, 0x9f, 0xe7, 0x9b, 0x85, 0xba, 0x1c, 0x86,
- 0x9e, 0x2f, 0x59, 0x30, 0x62, 0xc8, 0xb5, 0xa7, 0x12, 0x1e, 0xd8, 0x47, 0x47, 0xc6, 0x42, 0x0b,
- 0x5a, 0x34, 0x71, 0xb5, 0x16, 0x0a, 0x11, 0x8e, 0xd0, 0xd4, 0x79, 0x3e, 0xe7, 0x42, 0xfb, 0x9a,
- 0x09, 0xae, 0xd2, 0x1a, 0xf7, 0x07, 0x81, 0x95, 0x5e, 0x8c, 0xbe, 0xc6, 0x41, 0xc2, 0x83, 0xe7,
- 0x2c, 0x62, 0xba, 0x8f, 0x1f, 0x0f, 0x50, 0x69, 0xda, 0x82, 0xa2, 0x4e, 0x24, 0x3a, 0xa4, 0x49,
- 0x5a, 0xa5, 0x8d, 0x3b, 0x1d, 0xdb, 0xd9, 0x54, 0xf5, 0x04, 0xdf, 0x63, 0xe1, 0x4e, 0x22, 0xb1,
- 0x6f, 0x2b, 0x68, 0x0d, 0x96, 0xb8, 0x1f, 0xa1, 0x92, 0x7e, 0x80, 0xce, 0x42, 0x93, 0xb4, 0x96,
- 0xfa, 0xd3, 0x04, 0xa5, 0x50, 0x34, 0x2f, 0x4e, 0xc1, 0x1e, 0xd8, 0x98, 0x56, 0xa0, 0x30, 0xc4,
- 0xc4, 0x29, 0xda, 0x94, 0x09, 0x4d, 0x0f, 0xc5, 0x3e, 0xa1, 0x65, 0xe0, 0x2c, 0x36, 0x49, 0x6b,
- 0xb1, 0x3f, 0x4d, 0xb8, 0xdf, 0x09, 0xdc, 0x3a, 0x43, 0x50, 0x49, 0xc1, 0x15, 0x5e, 0x29, 0x86,
- 0x5f, 0x08, 0xdc, 0xde, 0x46, 0x7d, 0xb9, 0x7f, 0xd1, 0x8e, 0xf3, 0x8d, 0x7c, 0x7f, 0xd5, 0xc7,
- 0xf9, 0x95, 0xc0, 0xca, 0x16, 0x8e, 0xf0, 0xb2, 0x69, 0xba, 0x6b, 0xb0, 0x7a, 0x8e, 0x47, 0x6a,
- 0xae, 0x76, 0x07, 0x4a, 0xb3, 0xb0, 0xb4, 0x04, 0xd0, 0x7b, 0xf5, 0xf2, 0xe9, 0xb3, 0xed, 0x77,
- 0x2f, 0xba, 0xaf, 0x2b, 0xd7, 0xe8, 0x32, 0xdc, 0xd8, 0xea, 0xee, 0x74, 0x37, 0xbb, 0x83, 0x27,
- 0x15, 0xb2, 0xf1, 0xa7, 0x00, 0x37, 0xcd, 0x07, 0x03, 0x8c, 0xc7, 0x2c, 0x40, 0x1a, 0x41, 0x79,
- 0x6e, 0xb1, 0x68, 0x2d, 0x55, 0x93, 0xbd, 0x6f, 0xd5, 0xd5, 0xa9, 0xd6, 0x19, 0x26, 0xee, 0xfa,
- 0xe7, 0xdf, 0xff, 0xbe, 0x2d, 0xd4, 0x5d, 0xc7, 0xee, 0xf0, 0xf8, 0x41, 0xba, 0xe8, 0x47, 0xa7,
- 0x5a, 0x8f, 0x1f, 0x93, 0x36, 0xdd, 0x87, 0xe5, 0xb3, 0xf6, 0xa3, 0x6b, 0x69, 0xb7, 0x0c, 0x4b,
- 0xe6, 0x03, 0xdd, 0xb5, 0x40, 0x4d, 0xda, 0xc8, 0x03, 0xf2, 0x8e, 0x86, 0x98, 0x1c, 0x53, 0x05,
- 0xe5, 0x39, 0x93, 0x9d, 0x48, 0xcb, 0xf6, 0x5e, 0x3e, 0xe2, 0x3d, 0x8b, 0xb8, 0x5e, 0xbd, 0x00,
- 0xd1, 0x08, 0x1c, 0x43, 0x79, 0x6e, 0x54, 0x27, 0xa0, 0xd9, 0x4e, 0xaa, 0xd6, 0x73, 0x4e, 0x67,
- 0xc5, 0xb6, 0x2f, 0x80, 0xde, 0xec, 0xfd, 0x9c, 0x34, 0xc8, 0xaf, 0x49, 0x83, 0xfc, 0x9d, 0x34,
- 0xc8, 0xdb, 0x47, 0x21, 0xd3, 0x1f, 0x0e, 0x76, 0x3b, 0x81, 0x88, 0x3c, 0x3f, 0x0e, 0x85, 0x8c,
- 0xc5, 0xbe, 0x0d, 0xee, 0x1f, 0x8a, 0x78, 0xb8, 0x37, 0x12, 0x87, 0xca, 0x3b, 0x7f, 0x2b, 0xef,
- 0x5e, 0xb7, 0xb7, 0xed, 0xc3, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xce, 0x5f, 0xda, 0xd4, 0xb2,
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0x41, 0x6b, 0x13, 0x41,
+ 0x14, 0x76, 0x9a, 0xa4, 0x98, 0x67, 0x6d, 0xe2, 0x58, 0xd2, 0x6d, 0x48, 0x42, 0xd8, 0x82, 0xc4,
+ 0x40, 0xb3, 0x58, 0xf1, 0xe2, 0x2d, 0x4d, 0xb5, 0x08, 0x5a, 0x25, 0xa9, 0x17, 0x6f, 0xdb, 0xf5,
+ 0x75, 0x9d, 0x66, 0x77, 0x66, 0xdc, 0x99, 0xa6, 0x2c, 0xa5, 0x17, 0x2f, 0x82, 0x57, 0x8f, 0x5e,
+ 0xfc, 0x39, 0x1e, 0x05, 0xf1, 0x2e, 0xc1, 0x1f, 0x22, 0x3b, 0x6b, 0x6d, 0x93, 0x66, 0xc9, 0xd1,
+ 0x5c, 0x96, 0x37, 0x33, 0x1f, 0xf3, 0x7d, 0xdf, 0xbe, 0xef, 0x0d, 0xd4, 0xe5, 0xd0, 0x77, 0x5c,
+ 0xc9, 0xbc, 0x80, 0x21, 0xd7, 0x8e, 0x8a, 0xb9, 0x67, 0x3e, 0x1d, 0x19, 0x09, 0x2d, 0x68, 0x3e,
+ 0xa9, 0xab, 0x35, 0x5f, 0x08, 0x3f, 0xc0, 0x04, 0xe7, 0xb8, 0x9c, 0x0b, 0xed, 0x6a, 0x26, 0xb8,
+ 0x4a, 0x31, 0xf6, 0x57, 0x02, 0x95, 0x5e, 0x84, 0xae, 0xc6, 0x41, 0xcc, 0xbd, 0xe7, 0x2c, 0x64,
+ 0xba, 0x8f, 0xef, 0x4f, 0x50, 0x69, 0xda, 0x82, 0xbc, 0x8e, 0x25, 0x5a, 0xa4, 0x49, 0x5a, 0xab,
+ 0xdb, 0x6b, 0x1d, 0x73, 0x73, 0x82, 0xea, 0x09, 0x7e, 0xc4, 0xfc, 0x83, 0x58, 0x62, 0xdf, 0x20,
+ 0x68, 0x0d, 0x8a, 0xdc, 0x0d, 0x51, 0x49, 0xd7, 0x43, 0x6b, 0xa9, 0x49, 0x5a, 0xc5, 0xfe, 0xe5,
+ 0x06, 0xad, 0xc0, 0xb2, 0x17, 0xee, 0xbb, 0x21, 0x5a, 0x39, 0x73, 0xf4, 0x77, 0x45, 0xcb, 0x90,
+ 0x1b, 0x62, 0x6c, 0xe5, 0xcd, 0x66, 0x52, 0xd2, 0x35, 0x28, 0x04, 0x89, 0x02, 0xab, 0xd0, 0x24,
+ 0xad, 0x42, 0x3f, 0x5d, 0xd8, 0x5f, 0x08, 0xdc, 0xb9, 0x22, 0x4e, 0x49, 0xc1, 0x15, 0x2e, 0x8c,
+ 0xba, 0x8f, 0x04, 0xee, 0xee, 0xa1, 0xfe, 0xff, 0x7f, 0xcf, 0xb4, 0xf2, 0xb5, 0x7c, 0xbb, 0xc8,
+ 0xad, 0xfc, 0x44, 0xa0, 0xb2, 0x8b, 0x01, 0x2e, 0x82, 0x44, 0x7b, 0x03, 0xd6, 0xaf, 0x69, 0x49,
+ 0xc3, 0xd5, 0xde, 0x82, 0xd5, 0x49, 0x6a, 0x7a, 0x1b, 0x8a, 0xbd, 0x97, 0xfb, 0x4f, 0x9f, 0xed,
+ 0xbd, 0xe8, 0xbe, 0x2a, 0xdf, 0xa0, 0x2b, 0x70, 0x73, 0xb7, 0x7b, 0xd0, 0xdd, 0xe9, 0x0e, 0x9e,
+ 0x94, 0xc9, 0xf6, 0xcf, 0x1c, 0xdc, 0x4a, 0xf0, 0x03, 0x8c, 0x46, 0xcc, 0x43, 0x1a, 0x42, 0x69,
+ 0x6a, 0xa6, 0x68, 0x2d, 0x35, 0x34, 0x7b, 0xd4, 0xaa, 0xeb, 0x97, 0x76, 0x27, 0x84, 0xd8, 0x9b,
+ 0x1f, 0x7e, 0xfc, 0xfe, 0xbc, 0x54, 0xb7, 0x2d, 0x33, 0xbe, 0xa3, 0x07, 0xe9, 0x8c, 0x9f, 0xfd,
+ 0xb3, 0x7b, 0xfe, 0x98, 0xb4, 0xe9, 0x31, 0xac, 0x5c, 0x4d, 0x20, 0xdd, 0x48, 0x6f, 0x9b, 0x91,
+ 0xca, 0x6c, 0xa2, 0x7b, 0x86, 0xa8, 0x49, 0x1b, 0x59, 0x44, 0xce, 0xd9, 0x10, 0xe3, 0x73, 0xaa,
+ 0xa0, 0x34, 0x95, 0xb1, 0x0b, 0x6b, 0xb3, 0xa3, 0x97, 0xcd, 0x78, 0xdf, 0x30, 0x6e, 0x56, 0xe7,
+ 0x30, 0x26, 0x06, 0x47, 0x50, 0x9a, 0xea, 0xd4, 0x05, 0xe9, 0xec, 0x30, 0x55, 0xeb, 0x19, 0xa7,
+ 0x93, 0x66, 0xdb, 0x73, 0xa8, 0x77, 0x7a, 0xdf, 0xc6, 0x0d, 0xf2, 0x7d, 0xdc, 0x20, 0xbf, 0xc6,
+ 0x0d, 0xf2, 0xe6, 0x91, 0xcf, 0xf4, 0xbb, 0x93, 0xc3, 0x8e, 0x27, 0x42, 0xc7, 0x8d, 0x7c, 0x21,
+ 0x23, 0x71, 0x6c, 0x8a, 0xad, 0x53, 0x11, 0x0d, 0x8f, 0x02, 0x71, 0xaa, 0x9c, 0xeb, 0x0f, 0xf2,
+ 0xe1, 0xb2, 0x79, 0x68, 0x1f, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x53, 0x03, 0x44, 0xf3, 0xad,
0x05, 0x00, 0x00,
}
@@ -729,8 +729,8 @@ func (m *CreateSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.SizeLimit != 0 {
- i = encodeVarintSync(dAtA, i, uint64(m.SizeLimit))
+ if m.Limit != 0 {
+ i = encodeVarintSync(dAtA, i, uint64(m.Limit))
i--
dAtA[i] = 0x28
}
@@ -741,10 +741,10 @@ func (m *CreateSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
i--
dAtA[i] = 0x22
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintSync(dAtA, i, uint64(len(m.Name)))
+ if len(m.CmName) > 0 {
+ i -= len(m.CmName)
+ copy(dAtA[i:], m.CmName)
+ i = encodeVarintSync(dAtA, i, uint64(len(m.CmName)))
i--
dAtA[i] = 0x1a
}
@@ -787,8 +787,8 @@ func (m *SyncLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.SizeLimit != 0 {
- i = encodeVarintSync(dAtA, i, uint64(m.SizeLimit))
+ if m.Limit != 0 {
+ i = encodeVarintSync(dAtA, i, uint64(m.Limit))
i--
dAtA[i] = 0x28
}
@@ -799,10 +799,10 @@ func (m *SyncLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x22
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintSync(dAtA, i, uint64(len(m.Name)))
+ if len(m.CmName) > 0 {
+ i -= len(m.CmName)
+ copy(dAtA[i:], m.CmName)
+ i = encodeVarintSync(dAtA, i, uint64(len(m.CmName)))
i--
dAtA[i] = 0x1a
}
@@ -852,10 +852,10 @@ func (m *GetSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x22
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintSync(dAtA, i, uint64(len(m.Name)))
+ if len(m.CmName) > 0 {
+ i -= len(m.CmName)
+ copy(dAtA[i:], m.CmName)
+ i = encodeVarintSync(dAtA, i, uint64(len(m.CmName)))
i--
dAtA[i] = 0x1a
}
@@ -898,8 +898,8 @@ func (m *UpdateSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if m.SizeLimit != 0 {
- i = encodeVarintSync(dAtA, i, uint64(m.SizeLimit))
+ if m.Limit != 0 {
+ i = encodeVarintSync(dAtA, i, uint64(m.Limit))
i--
dAtA[i] = 0x28
}
@@ -910,10 +910,10 @@ func (m *UpdateSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
i--
dAtA[i] = 0x22
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintSync(dAtA, i, uint64(len(m.Name)))
+ if len(m.CmName) > 0 {
+ i -= len(m.CmName)
+ copy(dAtA[i:], m.CmName)
+ i = encodeVarintSync(dAtA, i, uint64(len(m.CmName)))
i--
dAtA[i] = 0x1a
}
@@ -963,10 +963,10 @@ func (m *DeleteSyncLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
i--
dAtA[i] = 0x22
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintSync(dAtA, i, uint64(len(m.Name)))
+ if len(m.CmName) > 0 {
+ i -= len(m.CmName)
+ copy(dAtA[i:], m.CmName)
+ i = encodeVarintSync(dAtA, i, uint64(len(m.CmName)))
i--
dAtA[i] = 0x1a
}
@@ -1036,7 +1036,7 @@ func (m *CreateSyncLimitRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- l = len(m.Name)
+ l = len(m.CmName)
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
@@ -1044,8 +1044,8 @@ func (m *CreateSyncLimitRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- if m.SizeLimit != 0 {
- n += 1 + sovSync(uint64(m.SizeLimit))
+ if m.Limit != 0 {
+ n += 1 + sovSync(uint64(m.Limit))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
@@ -1066,7 +1066,7 @@ func (m *SyncLimitResponse) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- l = len(m.Name)
+ l = len(m.CmName)
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
@@ -1074,8 +1074,8 @@ func (m *SyncLimitResponse) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- if m.SizeLimit != 0 {
- n += 1 + sovSync(uint64(m.SizeLimit))
+ if m.Limit != 0 {
+ n += 1 + sovSync(uint64(m.Limit))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
@@ -1096,7 +1096,7 @@ func (m *GetSyncLimitRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- l = len(m.Name)
+ l = len(m.CmName)
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
@@ -1123,7 +1123,7 @@ func (m *UpdateSyncLimitRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- l = len(m.Name)
+ l = len(m.CmName)
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
@@ -1131,8 +1131,8 @@ func (m *UpdateSyncLimitRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- if m.SizeLimit != 0 {
- n += 1 + sovSync(uint64(m.SizeLimit))
+ if m.Limit != 0 {
+ n += 1 + sovSync(uint64(m.Limit))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
@@ -1153,7 +1153,7 @@ func (m *DeleteSyncLimitRequest) Size() (n int) {
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
- l = len(m.Name)
+ l = len(m.CmName)
if l > 0 {
n += 1 + l + sovSync(uint64(l))
}
@@ -1267,7 +1267,7 @@ func (m *CreateSyncLimitRequest) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CmName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -1295,7 +1295,7 @@ func (m *CreateSyncLimitRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.CmName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
@@ -1331,9 +1331,9 @@ func (m *CreateSyncLimitRequest) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 5:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
- m.SizeLimit = 0
+ m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSync
@@ -1343,7 +1343,7 @@ func (m *CreateSyncLimitRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.SizeLimit |= int32(b&0x7F) << shift
+ m.Limit |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1452,7 +1452,7 @@ func (m *SyncLimitResponse) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CmName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -1480,7 +1480,7 @@ func (m *SyncLimitResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.CmName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
@@ -1516,9 +1516,9 @@ func (m *SyncLimitResponse) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 5:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
- m.SizeLimit = 0
+ m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSync
@@ -1528,7 +1528,7 @@ func (m *SyncLimitResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.SizeLimit |= int32(b&0x7F) << shift
+ m.Limit |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1637,7 +1637,7 @@ func (m *GetSyncLimitRequest) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CmName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -1665,7 +1665,7 @@ func (m *GetSyncLimitRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.CmName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
@@ -1803,7 +1803,7 @@ func (m *UpdateSyncLimitRequest) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CmName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -1831,7 +1831,7 @@ func (m *UpdateSyncLimitRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.CmName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
@@ -1867,9 +1867,9 @@ func (m *UpdateSyncLimitRequest) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 5:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
- m.SizeLimit = 0
+ m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSync
@@ -1879,7 +1879,7 @@ func (m *UpdateSyncLimitRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.SizeLimit |= int32(b&0x7F) << shift
+ m.Limit |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1988,7 +1988,7 @@ func (m *DeleteSyncLimitRequest) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CmName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -2016,7 +2016,7 @@ func (m *DeleteSyncLimitRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.CmName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
diff --git a/pkg/apiclient/sync/sync.proto b/pkg/apiclient/sync/sync.proto
index 8649fcd1b610..c17e442f1fc5 100644
--- a/pkg/apiclient/sync/sync.proto
+++ b/pkg/apiclient/sync/sync.proto
@@ -6,45 +6,45 @@ import "google/api/annotations.proto";
package sync;
enum SyncConfigType {
- CONFIG_MAP = 0;
+ CONFIGMAP = 0;
DATABASE = 1;
}
message CreateSyncLimitRequest {
SyncConfigType type = 1;
string namespace = 2;
- string name = 3;
+ string cmName = 3;
string key = 4;
- int32 sizeLimit = 5;
+ int32 limit = 5;
}
message SyncLimitResponse {
SyncConfigType type = 1;
string namespace = 2;
- string name = 3;
+ string cmName = 3;
string key = 4;
- int32 sizeLimit = 5;
+ int32 limit = 5;
}
message GetSyncLimitRequest {
SyncConfigType type = 1;
string namespace = 2;
- string name = 3;
+ string cmName = 3;
string key = 4;
}
message UpdateSyncLimitRequest {
SyncConfigType type = 1;
string namespace = 2;
- string name = 3;
+ string cmName = 3;
string key = 4;
- int32 sizeLimit = 5;
+ int32 limit = 5;
}
message DeleteSyncLimitRequest {
SyncConfigType type = 1;
string namespace = 2;
- string name = 3;
+ string cmName = 3;
string key = 4;
}
diff --git a/sdks/java/client/docs/SyncCreateSyncLimitRequest.md b/sdks/java/client/docs/SyncCreateSyncLimitRequest.md
index 844d242dda90..8f7b70533ada 100644
--- a/sdks/java/client/docs/SyncCreateSyncLimitRequest.md
+++ b/sdks/java/client/docs/SyncCreateSyncLimitRequest.md
@@ -7,10 +7,10 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
+**cmName** | **String** | | [optional]
**key** | **String** | | [optional]
-**name** | **String** | | [optional]
+**limit** | **Integer** | | [optional]
**namespace** | **String** | | [optional]
-**sizeLimit** | **Integer** | | [optional]
**type** | **SyncSyncConfigType** | | [optional]
diff --git a/sdks/java/client/docs/SyncServiceApi.md b/sdks/java/client/docs/SyncServiceApi.md
index 1617301dc385..2328a7f924cb 100644
--- a/sdks/java/client/docs/SyncServiceApi.md
+++ b/sdks/java/client/docs/SyncServiceApi.md
@@ -82,7 +82,7 @@ Name | Type | Description | Notes
# **syncServiceDeleteSyncLimit**
-> Object syncServiceDeleteSyncLimit(namespace, key, type, name)
+> Object syncServiceDeleteSyncLimit(namespace, key, type, cmName)
@@ -110,10 +110,10 @@ public class Example {
SyncServiceApi apiInstance = new SyncServiceApi(defaultClient);
String namespace = "namespace_example"; // String |
String key = "key_example"; // String |
- String type = "CONFIG_MAP"; // String |
- String name = "name_example"; // String |
+ String type = "CONFIGMAP"; // String |
+ String cmName = "cmName_example"; // String |
try {
- Object result = apiInstance.syncServiceDeleteSyncLimit(namespace, key, type, name);
+ Object result = apiInstance.syncServiceDeleteSyncLimit(namespace, key, type, cmName);
System.out.println(result);
} catch (ApiException e) {
System.err.println("Exception when calling SyncServiceApi#syncServiceDeleteSyncLimit");
@@ -132,8 +132,8 @@ Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**namespace** | **String**| |
**key** | **String**| |
- **type** | **String**| | [optional] [default to CONFIG_MAP] [enum: CONFIG_MAP, DATABASE]
- **name** | **String**| | [optional]
+ **type** | **String**| | [optional] [default to CONFIGMAP] [enum: CONFIGMAP, DATABASE]
+ **cmName** | **String**| | [optional]
### Return type
@@ -156,7 +156,7 @@ Name | Type | Description | Notes
# **syncServiceGetSyncLimit**
-> SyncSyncLimitResponse syncServiceGetSyncLimit(namespace, key, type, name)
+> SyncSyncLimitResponse syncServiceGetSyncLimit(namespace, key, type, cmName)
@@ -184,10 +184,10 @@ public class Example {
SyncServiceApi apiInstance = new SyncServiceApi(defaultClient);
String namespace = "namespace_example"; // String |
String key = "key_example"; // String |
- String type = "CONFIG_MAP"; // String |
- String name = "name_example"; // String |
+ String type = "CONFIGMAP"; // String |
+ String cmName = "cmName_example"; // String |
try {
- SyncSyncLimitResponse result = apiInstance.syncServiceGetSyncLimit(namespace, key, type, name);
+ SyncSyncLimitResponse result = apiInstance.syncServiceGetSyncLimit(namespace, key, type, cmName);
System.out.println(result);
} catch (ApiException e) {
System.err.println("Exception when calling SyncServiceApi#syncServiceGetSyncLimit");
@@ -206,8 +206,8 @@ Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**namespace** | **String**| |
**key** | **String**| |
- **type** | **String**| | [optional] [default to CONFIG_MAP] [enum: CONFIG_MAP, DATABASE]
- **name** | **String**| | [optional]
+ **type** | **String**| | [optional] [default to CONFIGMAP] [enum: CONFIGMAP, DATABASE]
+ **cmName** | **String**| | [optional]
### Return type
diff --git a/sdks/java/client/docs/SyncSyncConfigType.md b/sdks/java/client/docs/SyncSyncConfigType.md
index bf7836cac874..e0bbee061fb7 100644
--- a/sdks/java/client/docs/SyncSyncConfigType.md
+++ b/sdks/java/client/docs/SyncSyncConfigType.md
@@ -5,7 +5,7 @@
## Enum
-* `CONFIG_MAP` (value: `"CONFIG_MAP"`)
+* `CONFIGMAP` (value: `"CONFIGMAP"`)
* `DATABASE` (value: `"DATABASE"`)
diff --git a/sdks/java/client/docs/SyncSyncLimitResponse.md b/sdks/java/client/docs/SyncSyncLimitResponse.md
index 1a2eb35b407b..6511aa251239 100644
--- a/sdks/java/client/docs/SyncSyncLimitResponse.md
+++ b/sdks/java/client/docs/SyncSyncLimitResponse.md
@@ -7,10 +7,10 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
+**cmName** | **String** | | [optional]
**key** | **String** | | [optional]
-**name** | **String** | | [optional]
+**limit** | **Integer** | | [optional]
**namespace** | **String** | | [optional]
-**sizeLimit** | **Integer** | | [optional]
**type** | **SyncSyncConfigType** | | [optional]
diff --git a/sdks/java/client/docs/SyncUpdateSyncLimitRequest.md b/sdks/java/client/docs/SyncUpdateSyncLimitRequest.md
index 1342aa2ba186..80dd10046281 100644
--- a/sdks/java/client/docs/SyncUpdateSyncLimitRequest.md
+++ b/sdks/java/client/docs/SyncUpdateSyncLimitRequest.md
@@ -7,10 +7,10 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
+**cmName** | **String** | | [optional]
**key** | **String** | | [optional]
-**name** | **String** | | [optional]
+**limit** | **Integer** | | [optional]
**namespace** | **String** | | [optional]
-**sizeLimit** | **Integer** | | [optional]
**type** | **SyncSyncConfigType** | | [optional]
diff --git a/sdks/python/client/argo_workflows/api/sync_service_api.py b/sdks/python/client/argo_workflows/api/sync_service_api.py
index 26fd9db14223..8c559d1cdcd4 100644
--- a/sdks/python/client/argo_workflows/api/sync_service_api.py
+++ b/sdks/python/client/argo_workflows/api/sync_service_api.py
@@ -112,7 +112,7 @@ def __init__(self, api_client=None):
'namespace',
'key',
'type',
- 'name',
+ 'cm_name',
],
'required': [
'namespace',
@@ -132,7 +132,7 @@ def __init__(self, api_client=None):
'allowed_values': {
('type',): {
- "CONFIG_MAP": "CONFIG_MAP",
+ "CONFIGMAP": "CONFIGMAP",
"DATABASE": "DATABASE"
},
},
@@ -143,20 +143,20 @@ def __init__(self, api_client=None):
(str,),
'type':
(str,),
- 'name':
+ 'cm_name':
(str,),
},
'attribute_map': {
'namespace': 'namespace',
'key': 'key',
'type': 'type',
- 'name': 'name',
+ 'cm_name': 'cmName',
},
'location_map': {
'namespace': 'path',
'key': 'path',
'type': 'query',
- 'name': 'query',
+ 'cm_name': 'query',
},
'collection_format_map': {
}
@@ -185,7 +185,7 @@ def __init__(self, api_client=None):
'namespace',
'key',
'type',
- 'name',
+ 'cm_name',
],
'required': [
'namespace',
@@ -205,7 +205,7 @@ def __init__(self, api_client=None):
'allowed_values': {
('type',): {
- "CONFIG_MAP": "CONFIG_MAP",
+ "CONFIGMAP": "CONFIGMAP",
"DATABASE": "DATABASE"
},
},
@@ -216,20 +216,20 @@ def __init__(self, api_client=None):
(str,),
'type':
(str,),
- 'name':
+ 'cm_name':
(str,),
},
'attribute_map': {
'namespace': 'namespace',
'key': 'key',
'type': 'type',
- 'name': 'name',
+ 'cm_name': 'cmName',
},
'location_map': {
'namespace': 'path',
'key': 'path',
'type': 'query',
- 'name': 'query',
+ 'cm_name': 'query',
},
'collection_format_map': {
}
@@ -407,8 +407,8 @@ def delete_sync_limit(
key (str):
Keyword Args:
- type (str): [optional] if omitted the server will use the default value of "CONFIG_MAP"
- name (str): [optional]
+ type (str): [optional] if omitted the server will use the default value of "CONFIGMAP"
+ cm_name (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
@@ -490,8 +490,8 @@ def get_sync_limit(
key (str):
Keyword Args:
- type (str): [optional] if omitted the server will use the default value of "CONFIG_MAP"
- name (str): [optional]
+ type (str): [optional] if omitted the server will use the default value of "CONFIGMAP"
+ cm_name (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
diff --git a/sdks/python/client/argo_workflows/model/sync_create_sync_limit_request.py b/sdks/python/client/argo_workflows/model/sync_create_sync_limit_request.py
index 954bec8ff7cf..145a4df137ea 100644
--- a/sdks/python/client/argo_workflows/model/sync_create_sync_limit_request.py
+++ b/sdks/python/client/argo_workflows/model/sync_create_sync_limit_request.py
@@ -87,10 +87,10 @@ def openapi_types():
"""
lazy_import()
return {
+ 'cm_name': (str,), # noqa: E501
'key': (str,), # noqa: E501
- 'name': (str,), # noqa: E501
+ 'limit': (int,), # noqa: E501
'namespace': (str,), # noqa: E501
- 'size_limit': (int,), # noqa: E501
'type': (SyncSyncConfigType,), # noqa: E501
}
@@ -100,10 +100,10 @@ def discriminator():
attribute_map = {
+ 'cm_name': 'cmName', # noqa: E501
'key': 'key', # noqa: E501
- 'name': 'name', # noqa: E501
+ 'limit': 'limit', # noqa: E501
'namespace': 'namespace', # noqa: E501
- 'size_limit': 'sizeLimit', # noqa: E501
'type': 'type', # noqa: E501
}
@@ -148,10 +148,10 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
+ cm_name (str): [optional] # noqa: E501
key (str): [optional] # noqa: E501
- name (str): [optional] # noqa: E501
+ limit (int): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
- size_limit (int): [optional] # noqa: E501
type (SyncSyncConfigType): [optional] # noqa: E501
"""
@@ -234,10 +234,10 @@ def __init__(self, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
+ cm_name (str): [optional] # noqa: E501
key (str): [optional] # noqa: E501
- name (str): [optional] # noqa: E501
+ limit (int): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
- size_limit (int): [optional] # noqa: E501
type (SyncSyncConfigType): [optional] # noqa: E501
"""
diff --git a/sdks/python/client/argo_workflows/model/sync_sync_config_type.py b/sdks/python/client/argo_workflows/model/sync_sync_config_type.py
index 1075db334433..7fc396c52d74 100644
--- a/sdks/python/client/argo_workflows/model/sync_sync_config_type.py
+++ b/sdks/python/client/argo_workflows/model/sync_sync_config_type.py
@@ -52,7 +52,7 @@ class SyncSyncConfigType(ModelSimple):
allowed_values = {
('value',): {
- 'CONFIG_MAP': "CONFIG_MAP",
+ 'CONFIGMAP': "CONFIGMAP",
'DATABASE': "DATABASE",
},
}
@@ -105,10 +105,10 @@ def __init__(self, *args, **kwargs):
Note that value can be passed either in args or in kwargs, but not in both.
Args:
- args[0] (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501
+ args[0] (str): if omitted defaults to "CONFIGMAP", must be one of ["CONFIGMAP", "DATABASE", ] # noqa: E501
Keyword Args:
- value (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501
+ value (str): if omitted defaults to "CONFIGMAP", must be one of ["CONFIGMAP", "DATABASE", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
@@ -149,7 +149,7 @@ def __init__(self, *args, **kwargs):
args = list(args)
value = args.pop(0)
else:
- value = "CONFIG_MAP"
+ value = "CONFIGMAP"
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
@@ -191,10 +191,10 @@ def _from_openapi_data(cls, *args, **kwargs):
Note that value can be passed either in args or in kwargs, but not in both.
Args:
- args[0] (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501
+ args[0] (str): if omitted defaults to "CONFIGMAP", must be one of ["CONFIGMAP", "DATABASE", ] # noqa: E501
Keyword Args:
- value (str): if omitted defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ] # noqa: E501
+ value (str): if omitted defaults to "CONFIGMAP", must be one of ["CONFIGMAP", "DATABASE", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
@@ -237,7 +237,7 @@ def _from_openapi_data(cls, *args, **kwargs):
args = list(args)
value = args.pop(0)
else:
- value = "CONFIG_MAP"
+ value = "CONFIGMAP"
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
diff --git a/sdks/python/client/argo_workflows/model/sync_sync_limit_response.py b/sdks/python/client/argo_workflows/model/sync_sync_limit_response.py
index 14f44df836f1..e1051b40fecb 100644
--- a/sdks/python/client/argo_workflows/model/sync_sync_limit_response.py
+++ b/sdks/python/client/argo_workflows/model/sync_sync_limit_response.py
@@ -87,10 +87,10 @@ def openapi_types():
"""
lazy_import()
return {
+ 'cm_name': (str,), # noqa: E501
'key': (str,), # noqa: E501
- 'name': (str,), # noqa: E501
+ 'limit': (int,), # noqa: E501
'namespace': (str,), # noqa: E501
- 'size_limit': (int,), # noqa: E501
'type': (SyncSyncConfigType,), # noqa: E501
}
@@ -100,10 +100,10 @@ def discriminator():
attribute_map = {
+ 'cm_name': 'cmName', # noqa: E501
'key': 'key', # noqa: E501
- 'name': 'name', # noqa: E501
+ 'limit': 'limit', # noqa: E501
'namespace': 'namespace', # noqa: E501
- 'size_limit': 'sizeLimit', # noqa: E501
'type': 'type', # noqa: E501
}
@@ -148,10 +148,10 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
+ cm_name (str): [optional] # noqa: E501
key (str): [optional] # noqa: E501
- name (str): [optional] # noqa: E501
+ limit (int): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
- size_limit (int): [optional] # noqa: E501
type (SyncSyncConfigType): [optional] # noqa: E501
"""
@@ -234,10 +234,10 @@ def __init__(self, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
+ cm_name (str): [optional] # noqa: E501
key (str): [optional] # noqa: E501
- name (str): [optional] # noqa: E501
+ limit (int): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
- size_limit (int): [optional] # noqa: E501
type (SyncSyncConfigType): [optional] # noqa: E501
"""
diff --git a/sdks/python/client/argo_workflows/model/sync_update_sync_limit_request.py b/sdks/python/client/argo_workflows/model/sync_update_sync_limit_request.py
index 68f4a91154bb..29dbad573d08 100644
--- a/sdks/python/client/argo_workflows/model/sync_update_sync_limit_request.py
+++ b/sdks/python/client/argo_workflows/model/sync_update_sync_limit_request.py
@@ -87,10 +87,10 @@ def openapi_types():
"""
lazy_import()
return {
+ 'cm_name': (str,), # noqa: E501
'key': (str,), # noqa: E501
- 'name': (str,), # noqa: E501
+ 'limit': (int,), # noqa: E501
'namespace': (str,), # noqa: E501
- 'size_limit': (int,), # noqa: E501
'type': (SyncSyncConfigType,), # noqa: E501
}
@@ -100,10 +100,10 @@ def discriminator():
attribute_map = {
+ 'cm_name': 'cmName', # noqa: E501
'key': 'key', # noqa: E501
- 'name': 'name', # noqa: E501
+ 'limit': 'limit', # noqa: E501
'namespace': 'namespace', # noqa: E501
- 'size_limit': 'sizeLimit', # noqa: E501
'type': 'type', # noqa: E501
}
@@ -148,10 +148,10 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
+ cm_name (str): [optional] # noqa: E501
key (str): [optional] # noqa: E501
- name (str): [optional] # noqa: E501
+ limit (int): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
- size_limit (int): [optional] # noqa: E501
type (SyncSyncConfigType): [optional] # noqa: E501
"""
@@ -234,10 +234,10 @@ def __init__(self, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
+ cm_name (str): [optional] # noqa: E501
key (str): [optional] # noqa: E501
- name (str): [optional] # noqa: E501
+ limit (int): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
- size_limit (int): [optional] # noqa: E501
type (SyncSyncConfigType): [optional] # noqa: E501
"""
diff --git a/sdks/python/client/docs/SyncCreateSyncLimitRequest.md b/sdks/python/client/docs/SyncCreateSyncLimitRequest.md
index 90e31d02cde2..11e2e3a4c035 100644
--- a/sdks/python/client/docs/SyncCreateSyncLimitRequest.md
+++ b/sdks/python/client/docs/SyncCreateSyncLimitRequest.md
@@ -4,10 +4,10 @@
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
+**cm_name** | **str** | | [optional]
**key** | **str** | | [optional]
-**name** | **str** | | [optional]
+**limit** | **int** | | [optional]
**namespace** | **str** | | [optional]
-**size_limit** | **int** | | [optional]
**type** | [**SyncSyncConfigType**](SyncSyncConfigType.md) | | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]
diff --git a/sdks/python/client/docs/SyncServiceApi.md b/sdks/python/client/docs/SyncServiceApi.md
index 98afa8ced0d2..0ead77e1c044 100644
--- a/sdks/python/client/docs/SyncServiceApi.md
+++ b/sdks/python/client/docs/SyncServiceApi.md
@@ -50,11 +50,11 @@ with argo_workflows.ApiClient(configuration) as api_client:
api_instance = sync_service_api.SyncServiceApi(api_client)
namespace = "namespace_example" # str |
body = SyncCreateSyncLimitRequest(
+ cm_name="cm_name_example",
key="key_example",
- name="name_example",
+ limit=1,
namespace="namespace_example",
- size_limit=1,
- type=SyncSyncConfigType("CONFIG_MAP"),
+ type=SyncSyncConfigType("CONFIGMAP"),
) # SyncCreateSyncLimitRequest |
# example passing only required values which don't have defaults set
@@ -134,8 +134,8 @@ with argo_workflows.ApiClient(configuration) as api_client:
api_instance = sync_service_api.SyncServiceApi(api_client)
namespace = "namespace_example" # str |
key = "key_example" # str |
- type = "CONFIG_MAP" # str | (optional) if omitted the server will use the default value of "CONFIG_MAP"
- name = "name_example" # str | (optional)
+ type = "CONFIGMAP" # str | (optional) if omitted the server will use the default value of "CONFIGMAP"
+ cm_name = "cmName_example" # str | (optional)
# example passing only required values which don't have defaults set
try:
@@ -147,7 +147,7 @@ with argo_workflows.ApiClient(configuration) as api_client:
# example passing only required values which don't have defaults set
# and optional values
try:
- api_response = api_instance.delete_sync_limit(namespace, key, type=type, name=name)
+ api_response = api_instance.delete_sync_limit(namespace, key, type=type, cm_name=cm_name)
pprint(api_response)
except argo_workflows.ApiException as e:
print("Exception when calling SyncServiceApi->delete_sync_limit: %s\n" % e)
@@ -160,8 +160,8 @@ Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**namespace** | **str**| |
**key** | **str**| |
- **type** | **str**| | [optional] if omitted the server will use the default value of "CONFIG_MAP"
- **name** | **str**| | [optional]
+ **type** | **str**| | [optional] if omitted the server will use the default value of "CONFIGMAP"
+ **cm_name** | **str**| | [optional]
### Return type
@@ -225,8 +225,8 @@ with argo_workflows.ApiClient(configuration) as api_client:
api_instance = sync_service_api.SyncServiceApi(api_client)
namespace = "namespace_example" # str |
key = "key_example" # str |
- type = "CONFIG_MAP" # str | (optional) if omitted the server will use the default value of "CONFIG_MAP"
- name = "name_example" # str | (optional)
+ type = "CONFIGMAP" # str | (optional) if omitted the server will use the default value of "CONFIGMAP"
+ cm_name = "cmName_example" # str | (optional)
# example passing only required values which don't have defaults set
try:
@@ -238,7 +238,7 @@ with argo_workflows.ApiClient(configuration) as api_client:
# example passing only required values which don't have defaults set
# and optional values
try:
- api_response = api_instance.get_sync_limit(namespace, key, type=type, name=name)
+ api_response = api_instance.get_sync_limit(namespace, key, type=type, cm_name=cm_name)
pprint(api_response)
except argo_workflows.ApiException as e:
print("Exception when calling SyncServiceApi->get_sync_limit: %s\n" % e)
@@ -251,8 +251,8 @@ Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**namespace** | **str**| |
**key** | **str**| |
- **type** | **str**| | [optional] if omitted the server will use the default value of "CONFIG_MAP"
- **name** | **str**| | [optional]
+ **type** | **str**| | [optional] if omitted the server will use the default value of "CONFIGMAP"
+ **cm_name** | **str**| | [optional]
### Return type
@@ -318,11 +318,11 @@ with argo_workflows.ApiClient(configuration) as api_client:
namespace = "namespace_example" # str |
key = "key_example" # str |
body = SyncUpdateSyncLimitRequest(
+ cm_name="cm_name_example",
key="key_example",
- name="name_example",
+ limit=1,
namespace="namespace_example",
- size_limit=1,
- type=SyncSyncConfigType("CONFIG_MAP"),
+ type=SyncSyncConfigType("CONFIGMAP"),
) # SyncUpdateSyncLimitRequest |
# example passing only required values which don't have defaults set
diff --git a/sdks/python/client/docs/SyncSyncConfigType.md b/sdks/python/client/docs/SyncSyncConfigType.md
index 30753e9982ad..3314e6096562 100644
--- a/sdks/python/client/docs/SyncSyncConfigType.md
+++ b/sdks/python/client/docs/SyncSyncConfigType.md
@@ -4,7 +4,7 @@
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
-**value** | **str** | | defaults to "CONFIG_MAP", must be one of ["CONFIG_MAP", "DATABASE", ]
+**value** | **str** | | defaults to "CONFIGMAP", must be one of ["CONFIGMAP", "DATABASE", ]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
diff --git a/sdks/python/client/docs/SyncSyncLimitResponse.md b/sdks/python/client/docs/SyncSyncLimitResponse.md
index a489ec922771..294d4f9971f3 100644
--- a/sdks/python/client/docs/SyncSyncLimitResponse.md
+++ b/sdks/python/client/docs/SyncSyncLimitResponse.md
@@ -4,10 +4,10 @@
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
+**cm_name** | **str** | | [optional]
**key** | **str** | | [optional]
-**name** | **str** | | [optional]
+**limit** | **int** | | [optional]
**namespace** | **str** | | [optional]
-**size_limit** | **int** | | [optional]
**type** | [**SyncSyncConfigType**](SyncSyncConfigType.md) | | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]
diff --git a/sdks/python/client/docs/SyncUpdateSyncLimitRequest.md b/sdks/python/client/docs/SyncUpdateSyncLimitRequest.md
index 484738c504bb..6afb7adb6153 100644
--- a/sdks/python/client/docs/SyncUpdateSyncLimitRequest.md
+++ b/sdks/python/client/docs/SyncUpdateSyncLimitRequest.md
@@ -4,10 +4,10 @@
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
+**cm_name** | **str** | | [optional]
**key** | **str** | | [optional]
-**name** | **str** | | [optional]
+**limit** | **int** | | [optional]
**namespace** | **str** | | [optional]
-**size_limit** | **int** | | [optional]
**type** | [**SyncSyncConfigType**](SyncSyncConfigType.md) | | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]
diff --git a/server/apiserver/argoserver.go b/server/apiserver/argoserver.go
index 71c085c91847..a4264f547ed3 100644
--- a/server/apiserver/argoserver.go
+++ b/server/apiserver/argoserver.go
@@ -255,7 +255,8 @@ func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(st
artifactServer := artifacts.NewArtifactServer(as.gatekeeper, hydrator.New(offloadRepo), wfArchive, instanceIDService, artifactRepositories, log)
eventServer := event.NewController(ctx, instanceIDService, eventRecorderManager, as.eventQueueSize, as.eventWorkerCount, as.eventAsyncDispatch)
wfArchiveServer := workflowarchive.NewWorkflowArchiveServer(wfArchive, offloadRepo, config.WorkflowDefaults)
- syncServer := sync.NewSyncServer()
+
+ syncServer := sync.NewSyncServer(ctx, as.clients.Kubernetes, as.namespace, config.Synchronization)
wfStore, err := store.NewSQLiteStore(instanceIDService)
if err != nil {
log.WithFatal().Error(ctx, err.Error())
diff --git a/server/sync/sync_cm.go b/server/sync/sync_cm.go
new file mode 100644
index 000000000000..846f2b38a45b
--- /dev/null
+++ b/server/sync/sync_cm.go
@@ -0,0 +1,158 @@
+package sync
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "google.golang.org/grpc/codes"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+ "github.com/argoproj/argo-workflows/v3/server/auth"
+ sutils "github.com/argoproj/argo-workflows/v3/server/utils"
+)
+
+type configMapSyncProvider struct{}
+
+var _ SyncConfigProvider = &configMapSyncProvider{}
+
+func (s *configMapSyncProvider) createSyncLimit(ctx context.Context, req *syncpkg.CreateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ if req.Limit <= 0 {
+ return nil, sutils.ToStatusError(fmt.Errorf("limit must be greater than zero"), codes.InvalidArgument)
+ }
+
+ kubeClient := auth.GetKubeClient(ctx)
+
+ configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
+
+ cm, err := configmapGetter.Get(ctx, req.CmName, metav1.GetOptions{})
+ if err == nil {
+ _, has := cm.Data[req.Key]
+ if has {
+ return nil, sutils.ToStatusError(fmt.Errorf("sync limit cannot be created as it already exists"), codes.AlreadyExists)
+ }
+ return s.handleUpdateSyncLimit(ctx, &syncpkg.UpdateSyncLimitRequest{
+ CmName: req.CmName,
+ Namespace: req.Namespace,
+ Key: req.Key,
+ Limit: req.Limit,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
+ }, false)
+ }
+
+ cm = &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: req.CmName,
+ Namespace: req.Namespace,
+ },
+ Data: map[string]string{
+ req.Key: fmt.Sprint(req.Limit),
+ },
+ }
+
+ cm, err = configmapGetter.Create(ctx, cm, metav1.CreateOptions{})
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ return &syncpkg.SyncLimitResponse{
+ CmName: cm.Name,
+ Namespace: cm.Namespace,
+ Key: req.Key,
+ Limit: req.Limit,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
+ }, nil
+}
+
+func (s *configMapSyncProvider) getSyncLimit(ctx context.Context, req *syncpkg.GetSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ kubeClient := auth.GetKubeClient(ctx)
+
+ configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
+
+ cm, err := configmapGetter.Get(ctx, req.CmName, metav1.GetOptions{})
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ limit, ok := cm.Data[req.Key]
+ if !ok {
+ return nil, sutils.ToStatusError(fmt.Errorf("key %s not found in configmap %s/%s", req.Key, cm.Namespace, cm.Name), codes.NotFound)
+ }
+
+ parsedLimit, err := strconv.Atoi(limit)
+ if err != nil {
+ return nil, sutils.ToStatusError(fmt.Errorf("invalid limit format for key %s in configmap %s/%s", req.Key, cm.Namespace, cm.Name), codes.InvalidArgument)
+ }
+
+ return &syncpkg.SyncLimitResponse{
+ CmName: cm.Name,
+ Namespace: cm.Namespace,
+ Key: req.Key,
+ Limit: int32(parsedLimit),
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
+ }, nil
+}
+
+func (s *configMapSyncProvider) updateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ if req.Limit <= 0 {
+ return nil, sutils.ToStatusError(fmt.Errorf("limit must be greater than zero"), codes.InvalidArgument)
+ }
+
+ return s.handleUpdateSyncLimit(ctx, req, true)
+}
+
+func (s *configMapSyncProvider) deleteSyncLimit(ctx context.Context, req *syncpkg.DeleteSyncLimitRequest) (*syncpkg.DeleteSyncLimitResponse, error) {
+ kubeClient := auth.GetKubeClient(ctx)
+
+ configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
+
+ cm, err := configmapGetter.Get(ctx, req.CmName, metav1.GetOptions{})
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ delete(cm.Data, req.Key)
+
+ _, err = configmapGetter.Update(ctx, cm, metav1.UpdateOptions{})
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ return &syncpkg.DeleteSyncLimitResponse{}, nil
+}
+
+func (s *configMapSyncProvider) handleUpdateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest, shouldFieldExist bool) (*syncpkg.SyncLimitResponse, error) {
+ kubeClient := auth.GetKubeClient(ctx)
+
+ configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
+
+ cm, err := configmapGetter.Get(ctx, req.CmName, metav1.GetOptions{})
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ if cm.Data == nil {
+ cm.Data = make(map[string]string)
+ }
+
+ if _, ok := cm.Data[req.Key]; shouldFieldExist && !ok {
+ return nil, sutils.ToStatusError(fmt.Errorf("key %s not found in configmap %s/%s - please create it first", req.Key, cm.Namespace, cm.Name), codes.NotFound)
+ }
+
+ cm.Data[req.Key] = strconv.Itoa(int(req.Limit))
+
+ cm, err = configmapGetter.Update(ctx, cm, metav1.UpdateOptions{})
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ return &syncpkg.SyncLimitResponse{
+ CmName: cm.Name,
+ Namespace: cm.Namespace,
+ Key: req.Key,
+ Limit: req.Limit,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
+ }, nil
+}
diff --git a/server/sync/sync_db.go b/server/sync/sync_db.go
new file mode 100644
index 000000000000..9e2223c7aa5e
--- /dev/null
+++ b/server/sync/sync_db.go
@@ -0,0 +1,111 @@
+package sync
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/upper/db/v4"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+ "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow"
+ "github.com/argoproj/argo-workflows/v3/server/auth"
+ sutils "github.com/argoproj/argo-workflows/v3/server/utils"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
+)
+
+type dbSyncProvider struct {
+ db syncdb.SyncQueries
+}
+
+var _ SyncConfigProvider = &dbSyncProvider{}
+
+func (s *dbSyncProvider) createSyncLimit(ctx context.Context, req *syncpkg.CreateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ // since there's no permission system for db sync limits, we use the k8s RBAC check to see if the request is reasonable
+ // configmap version is relying on the k8s RBAC so we don't need to check permissions
+ allowed, err := auth.CanI(ctx, "create", workflow.WorkflowPlural, req.Namespace, "")
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+ if !allowed {
+ return nil, status.Error(codes.PermissionDenied, fmt.Sprintf("Permission denied, you are not allowed to create database sync limit in namespace \"%s\".", req.Namespace))
+ }
+
+ name := fmt.Sprintf("%s/%s", req.Namespace, req.Key)
+ _, err = s.db.GetSemaphoreLimit(ctx, name)
+ if err == nil {
+ return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Database sync limit already exists in namespace \"%s\".", req.Namespace))
+ } else if err != db.ErrNoMoreRows {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ err = s.db.CreateSemaphoreLimit(ctx, name, int(req.Limit))
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+
+ return &syncpkg.SyncLimitResponse{Key: req.Key, Namespace: req.Namespace, Limit: req.Limit, Type: syncpkg.SyncConfigType_DATABASE}, nil
+}
+
+func (s *dbSyncProvider) getSyncLimit(ctx context.Context, req *syncpkg.GetSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ allowed, err := auth.CanI(ctx, "get", workflow.WorkflowPlural, req.Namespace, "")
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+ if !allowed {
+ return nil, status.Error(codes.PermissionDenied, fmt.Sprintf("Permission denied, you are not allowed to get database sync limit in namespace \"%s\".", req.Namespace))
+ }
+
+ name := fmt.Sprintf("%s/%s", req.Namespace, req.Key)
+ limit, err := s.db.GetSemaphoreLimit(ctx, name)
+ if err != nil {
+ if err == db.ErrNoMoreRows {
+ return nil, status.Error(codes.NotFound, fmt.Sprintf("Database sync limit not found in namespace \"%s\".", req.Namespace))
+ }
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+ return &syncpkg.SyncLimitResponse{Key: req.Key, Namespace: req.Namespace, Limit: int32(limit.SizeLimit), Type: syncpkg.SyncConfigType_DATABASE}, nil
+}
+
+func (s *dbSyncProvider) updateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ allowed, err := auth.CanI(ctx, "update", workflow.WorkflowPlural, req.Namespace, "")
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+ if !allowed {
+ return nil, status.Error(codes.PermissionDenied, fmt.Sprintf("Permission denied, you are not allowed to update database sync limit in namespace \"%s\".", req.Namespace))
+ }
+
+ name := fmt.Sprintf("%s/%s", req.Namespace, req.Key)
+ err = s.db.UpdateSemaphoreLimit(ctx, name, int(req.Limit))
+ if err != nil {
+ if err == db.ErrNoMoreRows {
+ return nil, status.Error(codes.NotFound, fmt.Sprintf("Database sync limit not found in namespace \"%s\".", req.Namespace))
+ }
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+ return &syncpkg.SyncLimitResponse{Key: req.Key, Namespace: req.Namespace, Limit: req.Limit, Type: syncpkg.SyncConfigType_DATABASE}, nil
+}
+
+func (s *dbSyncProvider) deleteSyncLimit(ctx context.Context, req *syncpkg.DeleteSyncLimitRequest) (*syncpkg.DeleteSyncLimitResponse, error) {
+ allowed, err := auth.CanI(ctx, "delete", workflow.WorkflowPlural, req.Namespace, "")
+ if err != nil {
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+ if !allowed {
+ return nil, status.Error(codes.PermissionDenied, fmt.Sprintf("Permission denied, you are not allowed to delete database sync limit in namespace \"%s\".", req.Namespace))
+ }
+
+ // we don't care if semaphore is in use
+ // wc should be able to recover
+ name := fmt.Sprintf("%s/%s", req.Namespace, req.Key)
+ err = s.db.DeleteSemaphoreLimit(ctx, name)
+ if err != nil {
+ if err == db.ErrNoMoreRows {
+ return nil, status.Error(codes.NotFound, fmt.Sprintf("Database sync limit not found in namespace \"%s\".", req.Namespace))
+ }
+ return nil, sutils.ToStatusError(err, codes.Internal)
+ }
+ return &syncpkg.DeleteSyncLimitResponse{}, nil
+}
diff --git a/server/sync/sync_server.go b/server/sync/sync_server.go
index e74f88ba1f0f..670ea83d4438 100644
--- a/server/sync/sync_server.go
+++ b/server/sync/sync_server.go
@@ -3,155 +3,79 @@ package sync
import (
"context"
"fmt"
- "strconv"
"google.golang.org/grpc/codes"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+
+ "github.com/argoproj/argo-workflows/v3/config"
syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
- "github.com/argoproj/argo-workflows/v3/server/auth"
sutils "github.com/argoproj/argo-workflows/v3/server/utils"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
)
-type syncServer struct {
+type SyncConfigProvider interface {
+ createSyncLimit(ctx context.Context, req *syncpkg.CreateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error)
+ getSyncLimit(ctx context.Context, req *syncpkg.GetSyncLimitRequest) (*syncpkg.SyncLimitResponse, error)
+ updateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error)
+ deleteSyncLimit(ctx context.Context, req *syncpkg.DeleteSyncLimitRequest) (*syncpkg.DeleteSyncLimitResponse, error)
}
-func NewSyncServer() *syncServer {
- return &syncServer{}
+type syncServer struct {
+ providers map[syncpkg.SyncConfigType]SyncConfigProvider
}
-func (s *syncServer) CreateSyncLimit(ctx context.Context, req *syncpkg.CreateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
- if req.SizeLimit <= 0 {
- return nil, sutils.ToStatusError(fmt.Errorf("size limit must be greater than zero"), codes.InvalidArgument)
+func NewSyncServer(ctx context.Context, kubectlConfig kubernetes.Interface, namespace string, syncConfig *config.SyncConfig) *syncServer {
+ server := &syncServer{
+ providers: make(map[syncpkg.SyncConfigType]SyncConfigProvider),
}
- kubeClient := auth.GetKubeClient(ctx)
-
- configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
-
- cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{})
- if err == nil {
- _, has := cm.Data[req.Key]
- if has {
- return nil, fmt.Errorf("sync limit cannot be created as it already exists")
- }
- return s.updateSyncLimit(ctx, &syncpkg.UpdateSyncLimitRequest{
- Name: req.Name,
- Namespace: req.Namespace,
- Key: req.Key,
- SizeLimit: req.SizeLimit,
- }, false)
- }
+ server.providers[syncpkg.SyncConfigType_CONFIGMAP] = &configMapSyncProvider{}
- cm = &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: req.Name,
- Namespace: req.Namespace,
- },
- Data: map[string]string{
- req.Key: fmt.Sprint(req.SizeLimit),
- },
+ if syncConfig != nil && syncConfig.EnableAPI {
+ session := syncdb.DBSessionFromConfig(ctx, kubectlConfig, namespace, syncConfig)
+ server.providers[syncpkg.SyncConfigType_DATABASE] = &dbSyncProvider{db: syncdb.NewSyncQueries(session, syncdb.DBConfigFromConfig(syncConfig))}
}
- cm, err = configmapGetter.Create(ctx, cm, metav1.CreateOptions{})
- if err != nil {
- return nil, sutils.ToStatusError(err, codes.Internal)
- }
-
- return &syncpkg.SyncLimitResponse{
- Name: cm.Name,
- Namespace: cm.Namespace,
- Key: req.Key,
- SizeLimit: req.SizeLimit,
- }, nil
+ return server
}
-func (s *syncServer) GetSyncLimit(ctx context.Context, req *syncpkg.GetSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
- kubeClient := auth.GetKubeClient(ctx)
-
- configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
-
- cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{})
- if err != nil {
- return nil, sutils.ToStatusError(err, codes.Internal)
+func (s *syncServer) CreateSyncLimit(ctx context.Context, req *syncpkg.CreateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ if req.Limit <= 0 {
+ return nil, sutils.ToStatusError(fmt.Errorf("limit must be greater than zero"), codes.InvalidArgument)
}
- sizeLimit, ok := cm.Data[req.Key]
+ provider, ok := s.providers[req.Type]
if !ok {
- return nil, sutils.ToStatusError(fmt.Errorf("key %s not found in configmap %s/%s", req.Key, cm.Namespace, cm.Name), codes.NotFound)
+ return nil, sutils.ToStatusError(fmt.Errorf("unsupported sync config type: %s", req.Type), codes.InvalidArgument)
}
-
- parsedSizeLimit, err := strconv.Atoi(sizeLimit)
- if err != nil {
- return nil, sutils.ToStatusError(fmt.Errorf("invalid size limit format for key %s in configmap %s/%s", req.Key, cm.Namespace, cm.Name), codes.InvalidArgument)
- }
-
- return &syncpkg.SyncLimitResponse{
- Name: cm.Name,
- Namespace: cm.Namespace,
- Key: req.Key,
- SizeLimit: int32(parsedSizeLimit),
- }, nil
+ return provider.createSyncLimit(ctx, req)
}
-func (s *syncServer) UpdateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
- if req.SizeLimit <= 0 {
- return nil, sutils.ToStatusError(fmt.Errorf("size limit must be greater than zero"), codes.InvalidArgument)
+func (s *syncServer) GetSyncLimit(ctx context.Context, req *syncpkg.GetSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ provider, ok := s.providers[req.Type]
+ if !ok {
+ return nil, sutils.ToStatusError(fmt.Errorf("unsupported sync config type: %s", req.Type), codes.InvalidArgument)
}
-
- return s.updateSyncLimit(ctx, req, true)
+ return provider.getSyncLimit(ctx, req)
}
-func (s *syncServer) updateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest, shouldFieldExist bool) (*syncpkg.SyncLimitResponse, error) {
- kubeClient := auth.GetKubeClient(ctx)
-
- configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
-
- cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{})
- if err != nil {
- return nil, sutils.ToStatusError(err, codes.Internal)
- }
-
- if cm.Data == nil {
- cm.Data = make(map[string]string)
- }
-
- if _, ok := cm.Data[req.Key]; shouldFieldExist && !ok {
- return nil, sutils.ToStatusError(fmt.Errorf("key %s not found in configmap %s/%s - please create it first", req.Key, cm.Namespace, cm.Name), codes.NotFound)
+func (s *syncServer) UpdateSyncLimit(ctx context.Context, req *syncpkg.UpdateSyncLimitRequest) (*syncpkg.SyncLimitResponse, error) {
+ if req.Limit <= 0 {
+ return nil, sutils.ToStatusError(fmt.Errorf("limit must be greater than zero"), codes.InvalidArgument)
}
- cm.Data[req.Key] = strconv.Itoa(int(req.SizeLimit))
-
- cm, err = configmapGetter.Update(ctx, cm, metav1.UpdateOptions{})
- if err != nil {
- return nil, sutils.ToStatusError(err, codes.Internal)
+ provider, ok := s.providers[req.Type]
+ if !ok {
+ return nil, sutils.ToStatusError(fmt.Errorf("unsupported sync config type: %s", req.Type), codes.InvalidArgument)
}
-
- return &syncpkg.SyncLimitResponse{
- Name: cm.Name,
- Namespace: cm.Namespace,
- Key: req.Key,
- SizeLimit: req.SizeLimit,
- }, nil
+ return provider.updateSyncLimit(ctx, req)
}
func (s *syncServer) DeleteSyncLimit(ctx context.Context, req *syncpkg.DeleteSyncLimitRequest) (*syncpkg.DeleteSyncLimitResponse, error) {
- kubeClient := auth.GetKubeClient(ctx)
-
- configmapGetter := kubeClient.CoreV1().ConfigMaps(req.Namespace)
-
- cm, err := configmapGetter.Get(ctx, req.Name, metav1.GetOptions{})
- if err != nil {
- return nil, sutils.ToStatusError(err, codes.Internal)
- }
-
- delete(cm.Data, req.Key)
-
- _, err = configmapGetter.Update(ctx, cm, metav1.UpdateOptions{})
- if err != nil {
- return nil, sutils.ToStatusError(err, codes.Internal)
+ provider, ok := s.providers[req.Type]
+ if !ok {
+ return nil, sutils.ToStatusError(fmt.Errorf("unsupported sync config type: %s", req.Type), codes.InvalidArgument)
}
-
- return &syncpkg.DeleteSyncLimitResponse{}, nil
+ return provider.deleteSyncLimit(ctx, req)
}
diff --git a/server/sync/sync_server_test.go b/server/sync/sync_server_cm_test.go
similarity index 78%
rename from server/sync/sync_server_test.go
rename to server/sync/sync_server_cm_test.go
index acb042f1e1b9..91a443a9fce8 100644
--- a/server/sync/sync_server_test.go
+++ b/server/sync/sync_server_cm_test.go
@@ -25,15 +25,15 @@ func withKubeClient(kubeClient *fake.Clientset) context.Context {
}
func Test_syncServer_CreateSyncLimit(t *testing.T) {
- t.Run("SizeLimit <= 0", func(t *testing.T) {
- server := NewSyncServer()
+ t.Run("Limit <= 0", func(t *testing.T) {
ctx := context.Background()
+ server := NewSyncServer(ctx, &fake.Clientset{}, "", nil)
req := &syncpkg.CreateSyncLimitRequest{
- Name: "test-cm",
+ CmName: "test-cm",
Namespace: "test-ns",
Key: "test-key",
- SizeLimit: 0,
+ Limit: 0,
}
_, err := server.CreateSyncLimit(ctx, req)
@@ -42,11 +42,10 @@ func Test_syncServer_CreateSyncLimit(t *testing.T) {
statusErr, ok := status.FromError(err)
require.True(t, ok)
require.Equal(t, codes.InvalidArgument, statusErr.Code())
- require.Contains(t, statusErr.Message(), "size limit must be greater than zero")
+ require.Contains(t, statusErr.Message(), "limit must be greater than zero")
})
t.Run("Error creating ConfigMap", func(t *testing.T) {
- server := NewSyncServer()
kubeClient := fake.NewSimpleClientset()
kubeClient.PrependReactor("create", "configmaps", func(action ktesting.Action) (bool, runtime.Object, error) {
@@ -58,12 +57,13 @@ func Test_syncServer_CreateSyncLimit(t *testing.T) {
})
ctx := context.WithValue(context.Background(), auth.KubeKey, kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.CreateSyncLimitRequest{
- Name: "test-cm",
+ CmName: "test-cm",
Namespace: "non-existent-ns",
Key: "test-key",
- SizeLimit: 100,
+ Limit: 100,
}
_, err := server.CreateSyncLimit(ctx, req)
@@ -76,29 +76,27 @@ func Test_syncServer_CreateSyncLimit(t *testing.T) {
})
t.Run("Create new ConfigMap", func(t *testing.T) {
- server := NewSyncServer()
kubeClient := fake.NewSimpleClientset()
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.CreateSyncLimitRequest{
- Name: "test-cm",
+ CmName: "test-cm",
Namespace: "test-ns",
Key: "test-key",
- SizeLimit: 100,
+ Limit: 100,
}
resp, err := server.CreateSyncLimit(ctx, req)
require.NoError(t, err)
- require.Equal(t, "test-cm", resp.Name)
+ require.Equal(t, "test-cm", resp.CmName)
require.Equal(t, "test-ns", resp.Namespace)
require.Equal(t, "test-key", resp.Key)
- require.Equal(t, int32(100), resp.SizeLimit)
+ require.Equal(t, int32(100), resp.Limit)
})
t.Run("ConfigMap already exists", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-cm",
@@ -110,25 +108,25 @@ func Test_syncServer_CreateSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.CreateSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "new-key",
- SizeLimit: 200,
+ Limit: 200,
}
resp, err := server.CreateSyncLimit(ctx, req)
require.NoError(t, err)
- require.Equal(t, "existing-cm", resp.Name)
+ require.Equal(t, "existing-cm", resp.CmName)
require.Equal(t, "test-ns", resp.Namespace)
require.Equal(t, "new-key", resp.Key)
- require.Equal(t, int32(200), resp.SizeLimit)
+ require.Equal(t, int32(200), resp.Limit)
})
t.Run("ConfigMap exists with nil Data", func(t *testing.T) {
- server := NewSyncServer()
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -139,31 +137,62 @@ func Test_syncServer_CreateSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.CreateSyncLimitRequest{
- Name: "nil-data-cm",
+ CmName: "nil-data-cm",
Namespace: "test-ns",
Key: "test-key",
- SizeLimit: 300,
+ Limit: 300,
}
resp, err := server.CreateSyncLimit(ctx, req)
require.NoError(t, err)
- require.Equal(t, "nil-data-cm", resp.Name)
+ require.Equal(t, "nil-data-cm", resp.CmName)
require.Equal(t, "test-key", resp.Key)
- require.Equal(t, int32(300), resp.SizeLimit)
+ require.Equal(t, int32(300), resp.Limit)
+ })
+
+ t.Run("ConfigMap exists with existing key", func(t *testing.T) {
+ existingCM := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "existing-cm",
+ Namespace: "test-ns",
+ },
+ Data: map[string]string{
+ "existing-key": "50",
+ },
+ }
+ kubeClient := fake.NewSimpleClientset(existingCM)
+ ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
+
+ req := &syncpkg.CreateSyncLimitRequest{
+ CmName: "existing-cm",
+ Namespace: "test-ns",
+ Key: "existing-key",
+ Limit: 400,
+ }
+
+ _, err := server.CreateSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ statusErr, ok := status.FromError(err)
+ require.True(t, ok)
+ require.Equal(t, codes.AlreadyExists, statusErr.Code())
+ require.Contains(t, statusErr.Message(), "sync limit cannot be created as it already exists")
})
}
func Test_syncServer_GetSyncLimit(t *testing.T) {
t.Run("ConfigMap doesn't exist", func(t *testing.T) {
- server := NewSyncServer()
kubeClient := fake.NewSimpleClientset()
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.GetSyncLimitRequest{
- Name: "non-existent-cm",
+ CmName: "non-existent-cm",
Namespace: "test-ns",
Key: "test-key",
}
@@ -178,7 +207,6 @@ func Test_syncServer_GetSyncLimit(t *testing.T) {
})
t.Run("Key doesn't exist", func(t *testing.T) {
- server := NewSyncServer()
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -191,9 +219,10 @@ func Test_syncServer_GetSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.GetSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "non-existent-key",
}
@@ -208,7 +237,6 @@ func Test_syncServer_GetSyncLimit(t *testing.T) {
})
t.Run("Invalid size limit format", func(t *testing.T) {
- server := NewSyncServer()
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -221,9 +249,10 @@ func Test_syncServer_GetSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.GetSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "invalid-key",
}
@@ -234,12 +263,10 @@ func Test_syncServer_GetSyncLimit(t *testing.T) {
statusErr, ok := status.FromError(err)
require.True(t, ok)
require.Equal(t, codes.InvalidArgument, statusErr.Code())
- require.Contains(t, statusErr.Message(), "invalid size limit format")
+ require.Contains(t, statusErr.Message(), "invalid limit format")
})
t.Run("Successfully get sync limit", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-cm",
@@ -251,9 +278,10 @@ func Test_syncServer_GetSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.GetSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "valid-key",
}
@@ -261,23 +289,23 @@ func Test_syncServer_GetSyncLimit(t *testing.T) {
resp, err := server.GetSyncLimit(ctx, req)
require.NoError(t, err)
- require.Equal(t, "existing-cm", resp.Name)
+ require.Equal(t, "existing-cm", resp.CmName)
require.Equal(t, "test-ns", resp.Namespace)
require.Equal(t, "valid-key", resp.Key)
- require.Equal(t, int32(500), resp.SizeLimit)
+ require.Equal(t, int32(500), resp.Limit)
})
}
func Test_syncServer_UpdateSyncLimit(t *testing.T) {
- t.Run("SizeLimit <= 0", func(t *testing.T) {
- server := NewSyncServer()
+ t.Run("Limit <= 0", func(t *testing.T) {
ctx := context.Background()
+ server := NewSyncServer(ctx, fake.NewClientset(), "", nil)
req := &syncpkg.UpdateSyncLimitRequest{
- Name: "test-cm",
+ CmName: "test-cm",
Namespace: "test-ns",
Key: "test-key",
- SizeLimit: 0,
+ Limit: 0,
}
_, err := server.UpdateSyncLimit(ctx, req)
@@ -286,19 +314,19 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
statusErr, ok := status.FromError(err)
require.True(t, ok)
require.Equal(t, codes.InvalidArgument, statusErr.Code())
- require.Contains(t, statusErr.Message(), "size limit must be greater than zero")
+ require.Contains(t, statusErr.Message(), "limit must be greater than zero")
})
t.Run("ConfigMap doesn't exist", func(t *testing.T) {
- server := NewSyncServer()
kubeClient := fake.NewSimpleClientset()
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.UpdateSyncLimitRequest{
- Name: "non-existent-cm",
+ CmName: "non-existent-cm",
Namespace: "test-ns",
Key: "test-key",
- SizeLimit: 100,
+ Limit: 100,
}
_, err := server.UpdateSyncLimit(ctx, req)
@@ -311,8 +339,6 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
})
t.Run("ConfigMap with nil Data", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "nil-data-cm",
@@ -322,12 +348,13 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.UpdateSyncLimitRequest{
- Name: "nil-data-cm",
+ CmName: "nil-data-cm",
Namespace: "test-ns",
Key: "test-key",
- SizeLimit: 200,
+ Limit: 200,
}
_, err := server.UpdateSyncLimit(ctx, req)
@@ -340,7 +367,6 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
})
t.Run("Key doesn't exist", func(t *testing.T) {
- server := NewSyncServer()
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -353,12 +379,13 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.UpdateSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "non-existent-key",
- SizeLimit: 200,
+ Limit: 200,
}
_, err := server.UpdateSyncLimit(ctx, req)
@@ -371,8 +398,6 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
})
t.Run("Error updating ConfigMap", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-cm",
@@ -389,12 +414,13 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
})
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.UpdateSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "existing-key",
- SizeLimit: 200,
+ Limit: 200,
}
_, err := server.UpdateSyncLimit(ctx, req)
@@ -407,8 +433,6 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
})
t.Run("Successfully update sync limit", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-cm",
@@ -420,32 +444,33 @@ func Test_syncServer_UpdateSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.UpdateSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "existing-key",
- SizeLimit: 300,
+ Limit: 300,
}
resp, err := server.UpdateSyncLimit(ctx, req)
require.NoError(t, err)
- require.Equal(t, "existing-cm", resp.Name)
+ require.Equal(t, "existing-cm", resp.CmName)
require.Equal(t, "test-ns", resp.Namespace)
require.Equal(t, "existing-key", resp.Key)
- require.Equal(t, int32(300), resp.SizeLimit)
+ require.Equal(t, int32(300), resp.Limit)
})
}
func Test_syncServer_DeleteSyncLimit(t *testing.T) {
t.Run("ConfigMap doesn't exist", func(t *testing.T) {
- server := NewSyncServer()
kubeClient := fake.NewSimpleClientset()
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.DeleteSyncLimitRequest{
- Name: "non-existent-cm",
+ CmName: "non-existent-cm",
Namespace: "test-ns",
Key: "test-key",
}
@@ -460,8 +485,6 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
})
t.Run("ConfigMap with nil Data", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "nil-data-cm",
@@ -471,9 +494,10 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.DeleteSyncLimitRequest{
- Name: "nil-data-cm",
+ CmName: "nil-data-cm",
Namespace: "test-ns",
Key: "test-key",
}
@@ -484,8 +508,6 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
})
t.Run("ConfigMap with empty Data", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "empty-data-cm",
@@ -495,9 +517,10 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.DeleteSyncLimitRequest{
- Name: "empty-data-cm",
+ CmName: "empty-data-cm",
Namespace: "test-ns",
Key: "test-key",
}
@@ -508,8 +531,6 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
})
t.Run("Error updating ConfigMap", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-cm",
@@ -526,9 +547,10 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
})
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.DeleteSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "existing-key",
}
@@ -543,8 +565,6 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
})
t.Run("Successfully delete sync limit", func(t *testing.T) {
- server := NewSyncServer()
-
existingCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "existing-cm",
@@ -557,9 +577,10 @@ func Test_syncServer_DeleteSyncLimit(t *testing.T) {
}
kubeClient := fake.NewSimpleClientset(existingCM)
ctx := withKubeClient(kubeClient)
+ server := NewSyncServer(ctx, kubeClient, "", nil)
req := &syncpkg.DeleteSyncLimitRequest{
- Name: "existing-cm",
+ CmName: "existing-cm",
Namespace: "test-ns",
Key: "key1",
}
diff --git a/server/sync/sync_server_db_test.go b/server/sync/sync_server_db_test.go
new file mode 100644
index 000000000000..6572c6534b00
--- /dev/null
+++ b/server/sync/sync_server_db_test.go
@@ -0,0 +1,226 @@
+package sync
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ authorizationv1 "k8s.io/api/authorization/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ kubefake "k8s.io/client-go/kubernetes/fake"
+ k8stesting "k8s.io/client-go/testing"
+
+ "github.com/upper/db/v4"
+
+ syncpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/sync"
+ "github.com/argoproj/argo-workflows/v3/server/auth"
+ "github.com/argoproj/argo-workflows/v3/util/logging"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
+ syncdbmocks "github.com/argoproj/argo-workflows/v3/util/sync/db/mocks"
+)
+
+func TestDBSyncProvider(t *testing.T) {
+ mockSyncQueries := &syncdbmocks.SyncQueries{}
+ provider := &dbSyncProvider{db: mockSyncQueries}
+ server := &syncServer{
+ providers: map[syncpkg.SyncConfigType]SyncConfigProvider{
+ syncpkg.SyncConfigType_DATABASE: provider,
+ },
+ }
+
+ kubeClient := &kubefake.Clientset{}
+ allowed := true
+
+ kubeClient.AddReactor("create", "selfsubjectaccessreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
+ return true, &authorizationv1.SelfSubjectAccessReview{
+ Status: authorizationv1.SubjectAccessReviewStatus{Allowed: allowed},
+ }, nil
+ })
+
+ kubeClient.AddReactor("create", "selfsubjectrulesreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
+ var rules []authorizationv1.ResourceRule
+ if allowed {
+ rules = append(rules, authorizationv1.ResourceRule{})
+ }
+ return true, &authorizationv1.SelfSubjectRulesReview{
+ Status: authorizationv1.SubjectRulesReviewStatus{
+ ResourceRules: rules,
+ },
+ }, nil
+ })
+
+ ctx := context.WithValue(logging.TestContext(t.Context()), auth.KubeKey, kubeClient)
+
+ t.Run("CreateSyncLimit", func(t *testing.T) {
+ req := &syncpkg.CreateSyncLimitRequest{
+ Type: syncpkg.SyncConfigType_DATABASE,
+ Namespace: "test-ns",
+ Key: "test-name",
+ Limit: 5,
+ }
+
+ allowed = false
+ resp, err := provider.createSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.PermissionDenied, status.Code(err))
+
+ allowed = true
+ mockSyncQueries.On("GetSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(nil, assert.AnError).Once()
+ resp, err = provider.createSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.Internal, status.Code(err))
+
+ mockSyncQueries.On("GetSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(&syncdb.LimitRecord{}, nil).Once()
+ resp, err = provider.createSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.AlreadyExists, status.Code(err))
+
+ mockSyncQueries.On("GetSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(nil, db.ErrNoMoreRows).Once()
+ mockSyncQueries.On("CreateSemaphoreLimit", mock.Anything, "test-ns/test-name", 5).Return(assert.AnError).Once()
+ resp, err = provider.createSyncLimit(ctx, req)
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.Internal, status.Code(err))
+
+ mockSyncQueries.On("GetSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(nil, db.ErrNoMoreRows).Once()
+ mockSyncQueries.On("CreateSemaphoreLimit", mock.Anything, "test-ns/test-name", 5).Return(nil).Once()
+
+ resp, err = server.CreateSyncLimit(ctx, req)
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, "test-name", resp.Key)
+ require.Equal(t, "test-ns", resp.Namespace)
+ require.Equal(t, int32(5), resp.Limit)
+ })
+
+ t.Run("GetSyncLimit", func(t *testing.T) {
+ req := &syncpkg.GetSyncLimitRequest{
+ Type: syncpkg.SyncConfigType_DATABASE,
+ Namespace: "test-ns",
+ Key: "test-name",
+ }
+
+ allowed = false
+ resp, err := provider.getSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.PermissionDenied, status.Code(err))
+
+ allowed = true
+
+ mockSyncQueries.On("GetSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(nil, db.ErrNoMoreRows).Once()
+ resp, err = provider.getSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.NotFound, status.Code(err))
+
+ mockSyncQueries.On("GetSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(nil, assert.AnError).Once()
+ resp, err = provider.getSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.Internal, status.Code(err))
+
+ mockSyncQueries.On("GetSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(&syncdb.LimitRecord{
+ SizeLimit: 5,
+ }, nil).Once()
+
+ resp, err = provider.getSyncLimit(ctx, req)
+
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, "test-name", resp.Key)
+ require.Equal(t, "test-ns", resp.Namespace)
+ require.Equal(t, int32(5), resp.Limit)
+ })
+
+ t.Run("UpdateSyncLimit", func(t *testing.T) {
+ allowed = false
+
+ req := &syncpkg.UpdateSyncLimitRequest{
+ Type: syncpkg.SyncConfigType_DATABASE,
+ Namespace: "test-ns",
+ Key: "test-name",
+ Limit: 10,
+ }
+ resp, err := provider.updateSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.PermissionDenied, status.Code(err))
+
+ allowed = true
+
+ mockSyncQueries.On("UpdateSemaphoreLimit", mock.Anything, "test-ns/test-name", 10).Return(db.ErrNoMoreRows).Once()
+ resp, err = provider.updateSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.NotFound, status.Code(err))
+
+ mockSyncQueries.On("UpdateSemaphoreLimit", mock.Anything, "test-ns/test-name", 10).Return(assert.AnError).Once()
+ resp, err = provider.updateSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.Internal, status.Code(err))
+
+ mockSyncQueries.On("UpdateSemaphoreLimit", mock.Anything, "test-ns/test-name", 10).Return(nil).Once()
+ resp, err = provider.updateSyncLimit(ctx, req)
+
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, "test-name", resp.Key)
+ require.Equal(t, "test-ns", resp.Namespace)
+ require.Equal(t, int32(10), resp.Limit)
+ })
+
+ t.Run("DeleteSyncLimit", func(t *testing.T) {
+ allowed = false
+
+ req := &syncpkg.DeleteSyncLimitRequest{
+ Type: syncpkg.SyncConfigType_DATABASE,
+ Namespace: "test-ns",
+ Key: "test-name",
+ }
+ resp, err := provider.deleteSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.PermissionDenied, status.Code(err))
+
+ allowed = true
+
+ mockSyncQueries.On("DeleteSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(db.ErrNoMoreRows).Once()
+ resp, err = provider.deleteSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.NotFound, status.Code(err))
+
+ mockSyncQueries.On("DeleteSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(assert.AnError).Once()
+ resp, err = provider.deleteSyncLimit(ctx, req)
+
+ require.Error(t, err)
+ require.Nil(t, resp)
+ require.Equal(t, codes.Internal, status.Code(err))
+
+ mockSyncQueries.On("DeleteSemaphoreLimit", mock.Anything, "test-ns/test-name").Return(nil).Once()
+ resp, err = provider.deleteSyncLimit(ctx, req)
+
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ })
+}
diff --git a/test/e2e/argo_server_test.go b/test/e2e/argo_server_test.go
index 84a4fb6d2c80..35b50040b17c 100644
--- a/test/e2e/argo_server_test.go
+++ b/test/e2e/argo_server_test.go
@@ -2613,92 +2613,96 @@ spec:
}
-func (s *ArgoServerSuite) TestSyncService() {
+func (s *ArgoServerSuite) TestSyncConfigmapService() {
syncNamespace := "argo"
configmapName := "test-sync-cm"
syncKey := "test-key"
- s.Run("CreateSyncLimit", func() {
+ s.Run("CreateSyncLimitConfigmap", func() {
s.e().POST("/api/v1/sync/{namespace}", syncNamespace).
WithJSON(syncpkg.CreateSyncLimitRequest{
- Name: configmapName,
- Key: syncKey,
- SizeLimit: 100,
+ CmName: configmapName,
+ Key: syncKey,
+ Limit: 100,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
}).
Expect().
Status(200).
JSON().Object().
- HasValue("name", configmapName).
+ HasValue("cmName", configmapName).
HasValue("key", syncKey).
- HasValue("sizeLimit", 100)
+ HasValue("limit", 100)
})
s.Run("CreateSyncLimit-cm-exist", func() {
s.e().POST("/api/v1/sync/{namespace}", syncNamespace).
WithJSON(syncpkg.CreateSyncLimitRequest{
- Name: configmapName,
- Key: syncKey + "-exist",
- SizeLimit: 100,
+ CmName: configmapName,
+ Key: syncKey + "-exist",
+ Limit: 100,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
}).
Expect().
Status(200).
JSON().Object().
- HasValue("name", configmapName).
+ HasValue("cmName", configmapName).
HasValue("key", syncKey+"-exist").
- HasValue("sizeLimit", 100)
+ HasValue("limit", 100)
})
- s.Run("GetSyncLimit", func() {
+ s.Run("GetSyncLimitConfigmap", func() {
s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
- WithQuery("name", configmapName).
+ WithQuery("cmName", configmapName).
Expect().
Status(200).
JSON().Object().
- HasValue("name", configmapName).
+ HasValue("cmName", configmapName).
HasValue("key", syncKey).
- HasValue("sizeLimit", 100)
+ HasValue("limit", 100)
})
- s.Run("UpdateSyncLimit", func() {
+ s.Run("UpdateSyncLimitConfigmap", func() {
s.e().PUT("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
WithJSON(syncpkg.UpdateSyncLimitRequest{
- Name: configmapName,
- SizeLimit: 200,
+ CmName: configmapName,
+ Limit: 200,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
}).
Expect().
Status(200).
JSON().Object().
- HasValue("name", configmapName).
+ HasValue("cmName", configmapName).
HasValue("key", syncKey).
- HasValue("sizeLimit", 200)
+ HasValue("limit", 200)
})
s.Run("InvalidSizeLimit", func() {
s.e().POST("/api/v1/sync/{namespace}", syncNamespace).
WithJSON(syncpkg.CreateSyncLimitRequest{
- Name: configmapName + "-invalid",
- Key: syncKey,
- SizeLimit: 0,
+ CmName: configmapName + "-invalid",
+ Key: syncKey,
+ Limit: 0,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
}).
Expect().
Status(400)
})
- s.Run("KeyDoesNotExist", func() {
+ s.Run("KeyDoesNotExistConfigmap", func() {
s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey+"-non-existent").
- WithQuery("name", configmapName).
+ WithQuery("cmName", configmapName).
Expect().
Status(404)
})
- s.Run("DeleteSyncLimit", func() {
+ s.Run("DeleteSyncLimitConfigmap", func() {
s.e().DELETE("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
- WithQuery("name", configmapName).
+ WithQuery("cmName", configmapName).
Expect().
Status(200)
s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
- WithQuery("name", configmapName).
+ WithQuery("cmName", configmapName).
Expect().
Status(404)
})
@@ -2706,8 +2710,104 @@ func (s *ArgoServerSuite) TestSyncService() {
s.Run("UpdateNonExistentLimit", func() {
s.e().PUT("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey+"-non-existent").
WithJSON(syncpkg.UpdateSyncLimitRequest{
- Name: configmapName,
- SizeLimit: 200,
+ CmName: configmapName,
+ Limit: 200,
+ Type: syncpkg.SyncConfigType_CONFIGMAP,
+ }).Expect().
+ Status(404)
+ })
+}
+
+func (s *ArgoServerSuite) TestSyncDatabaseService() {
+ syncNamespace := "argo"
+ syncKey := "test-sync-db"
+
+ s.Run("CreateSyncLimitDatabase", func() {
+ s.e().POST("/api/v1/sync/{namespace}", syncNamespace).
+ WithJSON(syncpkg.CreateSyncLimitRequest{
+ Key: syncKey,
+ Limit: 100,
+ Type: syncpkg.SyncConfigType_DATABASE,
+ }).
+ Expect().
+ Status(200).
+ JSON().Object().
+ HasValue("key", syncKey).
+ HasValue("namespace", syncNamespace).
+ HasValue("limit", 100)
+ })
+
+ s.Run("CreateSyncLimitDatabaseAgain", func() {
+ s.e().POST("/api/v1/sync/{namespace}", syncNamespace).
+ WithJSON(syncpkg.CreateSyncLimitRequest{
+ Key: syncKey,
+ Limit: 100,
+ Type: syncpkg.SyncConfigType_DATABASE,
+ }).
+ Expect().
+ Status(409)
+ })
+
+ s.Run("GetSyncLimitDatabase", func() {
+ s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
+ WithQuery("type", int(syncpkg.SyncConfigType_DATABASE)).
+ Expect().
+ Status(200).
+ JSON().Object().
+ HasValue("key", syncKey).
+ HasValue("namespace", syncNamespace).
+ HasValue("limit", 100)
+ })
+
+ s.Run("UpdateSyncLimitDatabase", func() {
+ s.e().PUT("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
+ WithJSON(syncpkg.UpdateSyncLimitRequest{
+ Limit: 200,
+ Type: syncpkg.SyncConfigType_DATABASE,
+ }).
+ Expect().
+ Status(200).
+ JSON().Object().
+ HasValue("key", syncKey).
+ HasValue("namespace", syncNamespace).
+ HasValue("limit", 200)
+ })
+
+ s.Run("InvalidSizeLimitDatabase", func() {
+ s.e().POST("/api/v1/sync/{namespace}", syncNamespace).
+ WithJSON(syncpkg.CreateSyncLimitRequest{
+ Key: syncKey + "-invalid",
+ Limit: 0,
+ Type: syncpkg.SyncConfigType_DATABASE,
+ }).
+ Expect().
+ Status(400)
+ })
+
+ s.Run("KeyDoesNotExistDatabase", func() {
+ s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey+"-non-existent").
+ WithQuery("type", int(syncpkg.SyncConfigType_DATABASE)).
+ Expect().
+ Status(404)
+ })
+
+ s.Run("DeleteSyncLimitDatabase", func() {
+ s.e().DELETE("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
+ WithQuery("type", int(syncpkg.SyncConfigType_DATABASE)).
+ Expect().
+ Status(200)
+
+ s.e().GET("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey).
+ WithQuery("type", int(syncpkg.SyncConfigType_DATABASE)).
+ Expect().
+ Status(404)
+ })
+
+ s.Run("UpdateNonExistentLimitDatabase", func() {
+ s.e().PUT("/api/v1/sync/{namespace}/{key}", syncNamespace, syncKey+"-non-existent").
+ WithJSON(syncpkg.UpdateSyncLimitRequest{
+ Limit: 200,
+ Type: syncpkg.SyncConfigType_DATABASE,
}).Expect().
Status(404)
})
diff --git a/test/e2e/cli_test.go b/test/e2e/cli_test.go
index 918ef89437e6..cdd10ccdf0f6 100644
--- a/test/e2e/cli_test.go
+++ b/test/e2e/cli_test.go
@@ -1817,42 +1817,91 @@ func (s *CLISuite) TestArchive() {
})
}
-func (s *CLISuite) TestSyncCLI() {
+func (s *CLISuite) TestConfigMapSyncCLI() {
s.Given().
- RunCli([]string{"sync", "configmap", "create", "test-sync-configmap", "--key", "test-key", "--size-limit", "1000"}, func(t *testing.T, output string, err error) {
+ RunCli([]string{"sync", "create", "test-key", "--type", "configmap", "--cm-name", "test-sync-configmap", "--limit", "1000"}, func(t *testing.T, output string, err error) {
require.NoError(t, err)
- assert.Contains(t, output, "Configmap sync limit created")
- assert.Contains(t, output, "key test-key")
- assert.Contains(t, output, "size limit 1000")
+ assert.Contains(t, output, "Sync limit created")
+ assert.Contains(t, output, "Key: test-key")
+ assert.Contains(t, output, "Type: configmap")
+ assert.Contains(t, output, "ConfigMap Name: test-sync-configmap")
+ assert.Contains(t, output, "Namespace: argo")
+ assert.Contains(t, output, "Limit: 1000")
})
s.Run("Get ConfigMap sync config", func() {
s.Given().
- RunCli([]string{"sync", "configmap", "get", "test-sync-configmap", "--key", "test-key"}, func(t *testing.T, output string, err error) {
+ RunCli([]string{"sync", "get", "test-key", "--type", "configmap", "--cm-name", "test-sync-configmap"}, func(t *testing.T, output string, err error) {
require.NoError(t, err)
- assert.Contains(t, output, "Sync Configmap name: test-sync-configmap")
+ assert.Contains(t, output, "Key: test-key")
+ assert.Contains(t, output, "Type: configmap")
+ assert.Contains(t, output, "ConfigMap Name: test-sync-configmap")
assert.Contains(t, output, "Namespace: argo")
- assert.Contains(t, output, "Size Limit: 1000")
+ assert.Contains(t, output, "Limit: 1000")
})
})
s.Run("Update ConfigMap sync configs", func() {
s.Given().
- RunCli([]string{"sync", "configmap", "update", "test-sync-configmap", "--key", "test-key", "--size-limit", "2000"}, func(t *testing.T, output string, err error) {
+ RunCli([]string{"sync", "update", "test-key", "--type", "configmap", "--cm-name", "test-sync-configmap", "--limit", "2000"}, func(t *testing.T, output string, err error) {
require.NoError(t, err)
- assert.Contains(t, output, "Updated sync limit for ConfigMap test-sync-configmap")
- assert.Contains(t, output, "key test-key")
- assert.Contains(t, output, "size limit 2000")
+ assert.Contains(t, output, "Key: test-key")
+ assert.Contains(t, output, "Type: configmap")
+ assert.Contains(t, output, "ConfigMap Name: test-sync-configmap")
+ assert.Contains(t, output, "Namespace: argo")
+ assert.Contains(t, output, "Limit: 2000")
})
})
s.Run("Delete ConfigMap sync config", func() {
s.Given().
- RunCli([]string{"sync", "configmap", "delete", "test-sync-configmap", "--key", "test-key"}, func(t *testing.T, output string, err error) {
+ RunCli([]string{"sync", "delete", "test-key", "--type", "configmap", "--cm-name", "test-sync-configmap"}, func(t *testing.T, output string, err error) {
+ require.NoError(t, err)
+ assert.Contains(t, output, "Sync limit deleted")
+ })
+ })
+
+}
+
+func (s *CLISuite) TestDBSyncCLI() {
+ s.Given().
+ RunCli([]string{"sync", "create", "test-db-limit-key", "--type", "database", "--limit", "1000"}, func(t *testing.T, output string, err error) {
+ require.NoError(t, err)
+ assert.Contains(t, output, "Sync limit created")
+ assert.Contains(t, output, "Key: test-db-limit-key")
+ assert.Contains(t, output, "Type: database")
+ assert.Contains(t, output, "Namespace: argo")
+ assert.Contains(t, output, "Limit: 1000")
+ })
+
+ s.Run("Get Database sync config", func() {
+ s.Given().
+ RunCli([]string{"sync", "get", "test-db-limit-key", "--type", "database"}, func(t *testing.T, output string, err error) {
+ require.NoError(t, err)
+ assert.Contains(t, output, "Key: test-db-limit-key")
+ assert.Contains(t, output, "Type: database")
+ assert.Contains(t, output, "Namespace: argo")
+ assert.Contains(t, output, "Limit: 1000")
+ })
+ })
+
+ s.Run("Update Database sync configs", func() {
+ s.Given().
+ RunCli([]string{"sync", "update", "test-db-limit-key", "--type", "database", "--limit", "2000"}, func(t *testing.T, output string, err error) {
+ require.NoError(t, err)
+ assert.Contains(t, output, "Sync limit updated")
+ assert.Contains(t, output, "Key: test-db-limit-key")
+ assert.Contains(t, output, "Type: database")
+ assert.Contains(t, output, "Namespace: argo")
+ assert.Contains(t, output, "Limit: 2000")
+ })
+ })
+
+ s.Run("Delete Database sync config", func() {
+ s.Given().
+ RunCli([]string{"sync", "delete", "test-db-limit-key", "--type", "database"}, func(t *testing.T, output string, err error) {
require.NoError(t, err)
- assert.Contains(t, output, "Deleted sync limit for ConfigMap test-sync-configmap")
- assert.Contains(t, output, "argo namespace")
- assert.Contains(t, output, "key test-key")
+ assert.Contains(t, output, "Sync limit deleted")
})
})
diff --git a/workflow/sync/database_util.go b/util/sync/db/config.go
similarity index 59%
rename from workflow/sync/database_util.go
rename to util/sync/db/config.go
index a6a1a8abe605..1f1dd3d47a46 100644
--- a/workflow/sync/database_util.go
+++ b/util/sync/db/config.go
@@ -1,4 +1,4 @@
-package sync
+package db
import (
"context"
@@ -14,24 +14,24 @@ import (
)
type dbConfig struct {
- limitTable string
- stateTable string
- controllerTable string
- lockTable string
- controllerName string
- inactiveControllerTimeout time.Duration
- skipMigration bool
+ LimitTable string
+ StateTable string
+ ControllerTable string
+ LockTable string
+ ControllerName string
+ InactiveControllerTimeout time.Duration
+ SkipMigration bool
}
-type dbInfo struct {
- config dbConfig
- session db.Session
+type DBInfo struct {
+ Config dbConfig
+ Session db.Session
}
const (
- defaultDBPollSeconds = 10
- defaultDBHeartbeatSeconds = 60
- defaultDBInactiveControllerSeconds = 600
+ DefaultDBPollSeconds = 10
+ DefaultDBHeartbeatSeconds = 60
+ DefaultDBInactiveControllerSeconds = 600
defaultLimitTableName = "sync_limit"
defaultStateTableName = "sync_state"
@@ -46,7 +46,7 @@ func defaultTable(tableName, defaultName string) string {
return tableName
}
-func secondsToDurationWithDefault(value *int, defaultSeconds int) time.Duration {
+func SecondsToDurationWithDefault(value *int, defaultSeconds int) time.Duration {
dur := time.Duration(defaultSeconds) * time.Second
if value != nil {
dur = time.Duration(*value) * time.Second
@@ -54,18 +54,18 @@ func secondsToDurationWithDefault(value *int, defaultSeconds int) time.Duration
return dur
}
-func (d *dbInfo) migrate(ctx context.Context) {
- if d.session == nil {
+func (d *DBInfo) Migrate(ctx context.Context) {
+ if d.Session == nil {
return
}
logger := logging.RequireLoggerFromContext(ctx)
logger.Info(ctx, "Setting up sync manager database")
- if !d.config.skipMigration {
- err := migrate(ctx, d.session, &d.config)
+ if !d.Config.SkipMigration {
+ err := migrate(ctx, d.Session, &d.Config)
if err != nil {
// Carry on anyway, but database sync locks won't work
logger.WithError(err).Warn(ctx, "cannot initialize semaphore database, database sync locks won't work")
- d.session = nil
+ d.Session = nil
} else {
logger.Info(ctx, "Sync db migration complete")
}
@@ -74,23 +74,23 @@ func (d *dbInfo) migrate(ctx context.Context) {
}
}
-func dbConfigFromConfig(config *config.SyncConfig) dbConfig {
+func DBConfigFromConfig(config *config.SyncConfig) dbConfig {
if config == nil {
return dbConfig{}
}
return dbConfig{
- limitTable: defaultTable(config.LimitTableName, defaultLimitTableName),
- stateTable: defaultTable(config.StateTableName, defaultStateTableName),
- controllerTable: defaultTable(config.ControllerTableName, defaultControllerTableName),
- lockTable: defaultTable(config.LockTableName, defaultLockTableName),
- controllerName: config.ControllerName,
- inactiveControllerTimeout: secondsToDurationWithDefault(config.InactiveControllerSeconds,
- defaultDBInactiveControllerSeconds),
- skipMigration: config.SkipMigration,
+ LimitTable: defaultTable(config.LimitTableName, defaultLimitTableName),
+ StateTable: defaultTable(config.StateTableName, defaultStateTableName),
+ ControllerTable: defaultTable(config.ControllerTableName, defaultControllerTableName),
+ LockTable: defaultTable(config.LockTableName, defaultLockTableName),
+ ControllerName: config.ControllerName,
+ InactiveControllerTimeout: SecondsToDurationWithDefault(config.InactiveControllerSeconds,
+ DefaultDBInactiveControllerSeconds),
+ SkipMigration: config.SkipMigration,
}
}
-func dbSessionFromConfigWithCreds(config *config.SyncConfig, username, password string) db.Session {
+func DBSessionFromConfigWithCreds(config *config.SyncConfig, username, password string) db.Session {
if config == nil {
return nil
}
@@ -102,7 +102,7 @@ func dbSessionFromConfigWithCreds(config *config.SyncConfig, username, password
return dbSession
}
-func dbSessionFromConfig(ctx context.Context, kubectlConfig kubernetes.Interface, namespace string, config *config.SyncConfig) db.Session {
+func DBSessionFromConfig(ctx context.Context, kubectlConfig kubernetes.Interface, namespace string, config *config.SyncConfig) db.Session {
if config == nil {
return nil
}
diff --git a/workflow/sync/migrate.go b/util/sync/db/migrate.go
similarity index 67%
rename from workflow/sync/migrate.go
rename to util/sync/db/migrate.go
index 902c1c596ca6..fb1db67df1d3 100644
--- a/workflow/sync/migrate.go
+++ b/util/sync/db/migrate.go
@@ -1,4 +1,4 @@
-package sync
+package db
import (
"context"
@@ -14,19 +14,19 @@ const (
func migrate(ctx context.Context, session db.Session, config *dbConfig) (err error) {
return sqldb.Migrate(ctx, session, versionTable, []sqldb.Change{
- sqldb.AnsiSQLChange(`create table if not exists ` + config.limitTable + ` (
+ sqldb.AnsiSQLChange(`create table if not exists ` + config.LimitTable + ` (
name varchar(256) not null,
sizelimit int,
primary key (name)
)`),
- sqldb.AnsiSQLChange(`create unique index ilimit_name on ` + config.limitTable + ` (name)`),
- sqldb.AnsiSQLChange(`create table if not exists ` + config.controllerTable + ` (
+ sqldb.AnsiSQLChange(`create unique index ilimit_name on ` + config.LimitTable + ` (name)`),
+ sqldb.AnsiSQLChange(`create table if not exists ` + config.ControllerTable + ` (
controller varchar(64) not null,
time timestamp,
primary key (controller)
)`),
- sqldb.AnsiSQLChange(`create unique index icontroller_name on ` + config.controllerTable + ` (controller)`),
- sqldb.AnsiSQLChange(`create table if not exists ` + config.stateTable + ` (
+ sqldb.AnsiSQLChange(`create unique index icontroller_name on ` + config.ControllerTable + ` (controller)`),
+ sqldb.AnsiSQLChange(`create table if not exists ` + config.StateTable + ` (
name varchar(256),
workflowkey varchar(256),
controller varchar(64) not null,
@@ -35,16 +35,16 @@ func migrate(ctx context.Context, session db.Session, config *dbConfig) (err err
time timestamp,
primary key(name, workflowkey, controller)
)`),
- sqldb.AnsiSQLChange(`create index istate_name on ` + config.stateTable + ` (name)`),
- sqldb.AnsiSQLChange(`create index istate_workflowkey on ` + config.stateTable + ` (workflowkey)`),
- sqldb.AnsiSQLChange(`create index istate_controller on ` + config.stateTable + ` (controller)`),
- sqldb.AnsiSQLChange(`create index istate_held on ` + config.stateTable + ` (held)`),
- sqldb.AnsiSQLChange(`create table if not exists ` + config.lockTable + ` (
+ sqldb.AnsiSQLChange(`create index istate_name on ` + config.StateTable + ` (name)`),
+ sqldb.AnsiSQLChange(`create index istate_workflowkey on ` + config.StateTable + ` (workflowkey)`),
+ sqldb.AnsiSQLChange(`create index istate_controller on ` + config.StateTable + ` (controller)`),
+ sqldb.AnsiSQLChange(`create index istate_held on ` + config.StateTable + ` (held)`),
+ sqldb.AnsiSQLChange(`create table if not exists ` + config.LockTable + ` (
name varchar(256),
controller varchar(64) not null,
time timestamp,
primary key(name)
)`),
- sqldb.AnsiSQLChange(`create unique index ilock_name on ` + config.lockTable + ` (name)`),
+ sqldb.AnsiSQLChange(`create unique index ilock_name on ` + config.LockTable + ` (name)`),
})
}
diff --git a/util/sync/db/mocks/SyncQueries.go b/util/sync/db/mocks/SyncQueries.go
new file mode 100644
index 000000000000..33d4d35cab88
--- /dev/null
+++ b/util/sync/db/mocks/SyncQueries.go
@@ -0,0 +1,1685 @@
+// Code generated by mockery; DO NOT EDIT.
+// github.com/vektra/mockery
+// template: testify
+
+package mocks
+
+import (
+ "context"
+ "time"
+
+ "github.com/argoproj/argo-workflows/v3/util/sync/db"
+ mock "github.com/stretchr/testify/mock"
+ db0 "github.com/upper/db/v4"
+)
+
+// NewSyncQueries creates a new instance of SyncQueries. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewSyncQueries(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *SyncQueries {
+ mock := &SyncQueries{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
+
+// SyncQueries is an autogenerated mock type for the SyncQueries type
+type SyncQueries struct {
+ mock.Mock
+}
+
+type SyncQueries_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *SyncQueries) EXPECT() *SyncQueries_Expecter {
+ return &SyncQueries_Expecter{mock: &_m.Mock}
+}
+
+// AddToQueue provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) AddToQueue(ctx context.Context, record *db.StateRecord) error {
+ ret := _mock.Called(ctx, record)
+
+ if len(ret) == 0 {
+ panic("no return value specified for AddToQueue")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, *db.StateRecord) error); ok {
+ r0 = returnFunc(ctx, record)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_AddToQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddToQueue'
+type SyncQueries_AddToQueue_Call struct {
+ *mock.Call
+}
+
+// AddToQueue is a helper method to define mock.On call
+// - ctx context.Context
+// - record *db.StateRecord
+func (_e *SyncQueries_Expecter) AddToQueue(ctx interface{}, record interface{}) *SyncQueries_AddToQueue_Call {
+ return &SyncQueries_AddToQueue_Call{Call: _e.mock.On("AddToQueue", ctx, record)}
+}
+
+func (_c *SyncQueries_AddToQueue_Call) Run(run func(ctx context.Context, record *db.StateRecord)) *SyncQueries_AddToQueue_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 *db.StateRecord
+ if args[1] != nil {
+ arg1 = args[1].(*db.StateRecord)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_AddToQueue_Call) Return(err error) *SyncQueries_AddToQueue_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_AddToQueue_Call) RunAndReturn(run func(ctx context.Context, record *db.StateRecord) error) *SyncQueries_AddToQueue_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CheckQueueExists provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) CheckQueueExists(ctx context.Context, semaphoreName string, holderKey string, controllerName string) ([]db.StateRecord, error) {
+ ret := _mock.Called(ctx, semaphoreName, holderKey, controllerName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CheckQueueExists")
+ }
+
+ var r0 []db.StateRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string) ([]db.StateRecord, error)); ok {
+ return returnFunc(ctx, semaphoreName, holderKey, controllerName)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string) []db.StateRecord); ok {
+ r0 = returnFunc(ctx, semaphoreName, holderKey, controllerName)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.StateRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
+ r1 = returnFunc(ctx, semaphoreName, holderKey, controllerName)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_CheckQueueExists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckQueueExists'
+type SyncQueries_CheckQueueExists_Call struct {
+ *mock.Call
+}
+
+// CheckQueueExists is a helper method to define mock.On call
+// - ctx context.Context
+// - semaphoreName string
+// - holderKey string
+// - controllerName string
+func (_e *SyncQueries_Expecter) CheckQueueExists(ctx interface{}, semaphoreName interface{}, holderKey interface{}, controllerName interface{}) *SyncQueries_CheckQueueExists_Call {
+ return &SyncQueries_CheckQueueExists_Call{Call: _e.mock.On("CheckQueueExists", ctx, semaphoreName, holderKey, controllerName)}
+}
+
+func (_c *SyncQueries_CheckQueueExists_Call) Run(run func(ctx context.Context, semaphoreName string, holderKey string, controllerName string)) *SyncQueries_CheckQueueExists_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 string
+ if args[3] != nil {
+ arg3 = args[3].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_CheckQueueExists_Call) Return(stateRecords []db.StateRecord, err error) *SyncQueries_CheckQueueExists_Call {
+ _c.Call.Return(stateRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_CheckQueueExists_Call) RunAndReturn(run func(ctx context.Context, semaphoreName string, holderKey string, controllerName string) ([]db.StateRecord, error)) *SyncQueries_CheckQueueExists_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// CreateSemaphoreLimit provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) CreateSemaphoreLimit(ctx context.Context, name string, sizeLimit int) error {
+ ret := _mock.Called(ctx, name, sizeLimit)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CreateSemaphoreLimit")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, int) error); ok {
+ r0 = returnFunc(ctx, name, sizeLimit)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_CreateSemaphoreLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSemaphoreLimit'
+type SyncQueries_CreateSemaphoreLimit_Call struct {
+ *mock.Call
+}
+
+// CreateSemaphoreLimit is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - sizeLimit int
+func (_e *SyncQueries_Expecter) CreateSemaphoreLimit(ctx interface{}, name interface{}, sizeLimit interface{}) *SyncQueries_CreateSemaphoreLimit_Call {
+ return &SyncQueries_CreateSemaphoreLimit_Call{Call: _e.mock.On("CreateSemaphoreLimit", ctx, name, sizeLimit)}
+}
+
+func (_c *SyncQueries_CreateSemaphoreLimit_Call) Run(run func(ctx context.Context, name string, sizeLimit int)) *SyncQueries_CreateSemaphoreLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 int
+ if args[2] != nil {
+ arg2 = args[2].(int)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_CreateSemaphoreLimit_Call) Return(err error) *SyncQueries_CreateSemaphoreLimit_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_CreateSemaphoreLimit_Call) RunAndReturn(run func(ctx context.Context, name string, sizeLimit int) error) *SyncQueries_CreateSemaphoreLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteLock provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) DeleteLock(ctx context.Context, lockName string) error {
+ ret := _mock.Called(ctx, lockName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteLock")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = returnFunc(ctx, lockName)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_DeleteLock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteLock'
+type SyncQueries_DeleteLock_Call struct {
+ *mock.Call
+}
+
+// DeleteLock is a helper method to define mock.On call
+// - ctx context.Context
+// - lockName string
+func (_e *SyncQueries_Expecter) DeleteLock(ctx interface{}, lockName interface{}) *SyncQueries_DeleteLock_Call {
+ return &SyncQueries_DeleteLock_Call{Call: _e.mock.On("DeleteLock", ctx, lockName)}
+}
+
+func (_c *SyncQueries_DeleteLock_Call) Run(run func(ctx context.Context, lockName string)) *SyncQueries_DeleteLock_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_DeleteLock_Call) Return(err error) *SyncQueries_DeleteLock_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_DeleteLock_Call) RunAndReturn(run func(ctx context.Context, lockName string) error) *SyncQueries_DeleteLock_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// DeleteSemaphoreLimit provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) DeleteSemaphoreLimit(ctx context.Context, name string) error {
+ ret := _mock.Called(ctx, name)
+
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteSemaphoreLimit")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = returnFunc(ctx, name)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_DeleteSemaphoreLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteSemaphoreLimit'
+type SyncQueries_DeleteSemaphoreLimit_Call struct {
+ *mock.Call
+}
+
+// DeleteSemaphoreLimit is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+func (_e *SyncQueries_Expecter) DeleteSemaphoreLimit(ctx interface{}, name interface{}) *SyncQueries_DeleteSemaphoreLimit_Call {
+ return &SyncQueries_DeleteSemaphoreLimit_Call{Call: _e.mock.On("DeleteSemaphoreLimit", ctx, name)}
+}
+
+func (_c *SyncQueries_DeleteSemaphoreLimit_Call) Run(run func(ctx context.Context, name string)) *SyncQueries_DeleteSemaphoreLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_DeleteSemaphoreLimit_Call) Return(err error) *SyncQueries_DeleteSemaphoreLimit_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_DeleteSemaphoreLimit_Call) RunAndReturn(run func(ctx context.Context, name string) error) *SyncQueries_DeleteSemaphoreLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ExpireInactiveLocks provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) ExpireInactiveLocks(ctx context.Context, inactiveTimeout time.Duration) (int64, error) {
+ ret := _mock.Called(ctx, inactiveTimeout)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ExpireInactiveLocks")
+ }
+
+ var r0 int64
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, time.Duration) (int64, error)); ok {
+ return returnFunc(ctx, inactiveTimeout)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, time.Duration) int64); ok {
+ r0 = returnFunc(ctx, inactiveTimeout)
+ } else {
+ r0 = ret.Get(0).(int64)
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, time.Duration) error); ok {
+ r1 = returnFunc(ctx, inactiveTimeout)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_ExpireInactiveLocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExpireInactiveLocks'
+type SyncQueries_ExpireInactiveLocks_Call struct {
+ *mock.Call
+}
+
+// ExpireInactiveLocks is a helper method to define mock.On call
+// - ctx context.Context
+// - inactiveTimeout time.Duration
+func (_e *SyncQueries_Expecter) ExpireInactiveLocks(ctx interface{}, inactiveTimeout interface{}) *SyncQueries_ExpireInactiveLocks_Call {
+ return &SyncQueries_ExpireInactiveLocks_Call{Call: _e.mock.On("ExpireInactiveLocks", ctx, inactiveTimeout)}
+}
+
+func (_c *SyncQueries_ExpireInactiveLocks_Call) Run(run func(ctx context.Context, inactiveTimeout time.Duration)) *SyncQueries_ExpireInactiveLocks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 time.Duration
+ if args[1] != nil {
+ arg1 = args[1].(time.Duration)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_ExpireInactiveLocks_Call) Return(n int64, err error) *SyncQueries_ExpireInactiveLocks_Call {
+ _c.Call.Return(n, err)
+ return _c
+}
+
+func (_c *SyncQueries_ExpireInactiveLocks_Call) RunAndReturn(run func(ctx context.Context, inactiveTimeout time.Duration) (int64, error)) *SyncQueries_ExpireInactiveLocks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetCurrentHolders provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetCurrentHolders(ctx context.Context, session db0.Session, semaphoreName string) ([]db.StateRecord, error) {
+ ret := _mock.Called(ctx, session, semaphoreName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetCurrentHolders")
+ }
+
+ var r0 []db.StateRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string) ([]db.StateRecord, error)); ok {
+ return returnFunc(ctx, session, semaphoreName)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string) []db.StateRecord); ok {
+ r0 = returnFunc(ctx, session, semaphoreName)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.StateRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, db0.Session, string) error); ok {
+ r1 = returnFunc(ctx, session, semaphoreName)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetCurrentHolders_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCurrentHolders'
+type SyncQueries_GetCurrentHolders_Call struct {
+ *mock.Call
+}
+
+// GetCurrentHolders is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - semaphoreName string
+func (_e *SyncQueries_Expecter) GetCurrentHolders(ctx interface{}, session interface{}, semaphoreName interface{}) *SyncQueries_GetCurrentHolders_Call {
+ return &SyncQueries_GetCurrentHolders_Call{Call: _e.mock.On("GetCurrentHolders", ctx, session, semaphoreName)}
+}
+
+func (_c *SyncQueries_GetCurrentHolders_Call) Run(run func(ctx context.Context, session db0.Session, semaphoreName string)) *SyncQueries_GetCurrentHolders_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetCurrentHolders_Call) Return(stateRecords []db.StateRecord, err error) *SyncQueries_GetCurrentHolders_Call {
+ _c.Call.Return(stateRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetCurrentHolders_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, semaphoreName string) ([]db.StateRecord, error)) *SyncQueries_GetCurrentHolders_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetCurrentPending provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetCurrentPending(ctx context.Context, semaphoreName string) ([]db.StateRecord, error) {
+ ret := _mock.Called(ctx, semaphoreName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetCurrentPending")
+ }
+
+ var r0 []db.StateRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string) ([]db.StateRecord, error)); ok {
+ return returnFunc(ctx, semaphoreName)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string) []db.StateRecord); ok {
+ r0 = returnFunc(ctx, semaphoreName)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.StateRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = returnFunc(ctx, semaphoreName)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetCurrentPending_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCurrentPending'
+type SyncQueries_GetCurrentPending_Call struct {
+ *mock.Call
+}
+
+// GetCurrentPending is a helper method to define mock.On call
+// - ctx context.Context
+// - semaphoreName string
+func (_e *SyncQueries_Expecter) GetCurrentPending(ctx interface{}, semaphoreName interface{}) *SyncQueries_GetCurrentPending_Call {
+ return &SyncQueries_GetCurrentPending_Call{Call: _e.mock.On("GetCurrentPending", ctx, semaphoreName)}
+}
+
+func (_c *SyncQueries_GetCurrentPending_Call) Run(run func(ctx context.Context, semaphoreName string)) *SyncQueries_GetCurrentPending_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetCurrentPending_Call) Return(stateRecords []db.StateRecord, err error) *SyncQueries_GetCurrentPending_Call {
+ _c.Call.Return(stateRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetCurrentPending_Call) RunAndReturn(run func(ctx context.Context, semaphoreName string) ([]db.StateRecord, error)) *SyncQueries_GetCurrentPending_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetCurrentState provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetCurrentState(ctx context.Context, session db0.Session, semaphoreName string, held bool) ([]db.StateRecord, error) {
+ ret := _mock.Called(ctx, session, semaphoreName, held)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetCurrentState")
+ }
+
+ var r0 []db.StateRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, bool) ([]db.StateRecord, error)); ok {
+ return returnFunc(ctx, session, semaphoreName, held)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, bool) []db.StateRecord); ok {
+ r0 = returnFunc(ctx, session, semaphoreName, held)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.StateRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, db0.Session, string, bool) error); ok {
+ r1 = returnFunc(ctx, session, semaphoreName, held)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetCurrentState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCurrentState'
+type SyncQueries_GetCurrentState_Call struct {
+ *mock.Call
+}
+
+// GetCurrentState is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - semaphoreName string
+// - held bool
+func (_e *SyncQueries_Expecter) GetCurrentState(ctx interface{}, session interface{}, semaphoreName interface{}, held interface{}) *SyncQueries_GetCurrentState_Call {
+ return &SyncQueries_GetCurrentState_Call{Call: _e.mock.On("GetCurrentState", ctx, session, semaphoreName, held)}
+}
+
+func (_c *SyncQueries_GetCurrentState_Call) Run(run func(ctx context.Context, session db0.Session, semaphoreName string, held bool)) *SyncQueries_GetCurrentState_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 bool
+ if args[3] != nil {
+ arg3 = args[3].(bool)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetCurrentState_Call) Return(stateRecords []db.StateRecord, err error) *SyncQueries_GetCurrentState_Call {
+ _c.Call.Return(stateRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetCurrentState_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, semaphoreName string, held bool) ([]db.StateRecord, error)) *SyncQueries_GetCurrentState_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetExistingLocks provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetExistingLocks(ctx context.Context, lockName string, controllerName string) ([]db.LockRecord, error) {
+ ret := _mock.Called(ctx, lockName, controllerName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetExistingLocks")
+ }
+
+ var r0 []db.LockRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, string) ([]db.LockRecord, error)); ok {
+ return returnFunc(ctx, lockName, controllerName)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, string) []db.LockRecord); ok {
+ r0 = returnFunc(ctx, lockName, controllerName)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.LockRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, string, string) error); ok {
+ r1 = returnFunc(ctx, lockName, controllerName)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetExistingLocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExistingLocks'
+type SyncQueries_GetExistingLocks_Call struct {
+ *mock.Call
+}
+
+// GetExistingLocks is a helper method to define mock.On call
+// - ctx context.Context
+// - lockName string
+// - controllerName string
+func (_e *SyncQueries_Expecter) GetExistingLocks(ctx interface{}, lockName interface{}, controllerName interface{}) *SyncQueries_GetExistingLocks_Call {
+ return &SyncQueries_GetExistingLocks_Call{Call: _e.mock.On("GetExistingLocks", ctx, lockName, controllerName)}
+}
+
+func (_c *SyncQueries_GetExistingLocks_Call) Run(run func(ctx context.Context, lockName string, controllerName string)) *SyncQueries_GetExistingLocks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetExistingLocks_Call) Return(lockRecords []db.LockRecord, err error) *SyncQueries_GetExistingLocks_Call {
+ _c.Call.Return(lockRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetExistingLocks_Call) RunAndReturn(run func(ctx context.Context, lockName string, controllerName string) ([]db.LockRecord, error)) *SyncQueries_GetExistingLocks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetOrderedQueue provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetOrderedQueue(ctx context.Context, session db0.Session, semaphoreName string, inactiveTimeout time.Duration) ([]db.StateRecord, error) {
+ ret := _mock.Called(ctx, session, semaphoreName, inactiveTimeout)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetOrderedQueue")
+ }
+
+ var r0 []db.StateRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, time.Duration) ([]db.StateRecord, error)); ok {
+ return returnFunc(ctx, session, semaphoreName, inactiveTimeout)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, time.Duration) []db.StateRecord); ok {
+ r0 = returnFunc(ctx, session, semaphoreName, inactiveTimeout)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.StateRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, db0.Session, string, time.Duration) error); ok {
+ r1 = returnFunc(ctx, session, semaphoreName, inactiveTimeout)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetOrderedQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOrderedQueue'
+type SyncQueries_GetOrderedQueue_Call struct {
+ *mock.Call
+}
+
+// GetOrderedQueue is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - semaphoreName string
+// - inactiveTimeout time.Duration
+func (_e *SyncQueries_Expecter) GetOrderedQueue(ctx interface{}, session interface{}, semaphoreName interface{}, inactiveTimeout interface{}) *SyncQueries_GetOrderedQueue_Call {
+ return &SyncQueries_GetOrderedQueue_Call{Call: _e.mock.On("GetOrderedQueue", ctx, session, semaphoreName, inactiveTimeout)}
+}
+
+func (_c *SyncQueries_GetOrderedQueue_Call) Run(run func(ctx context.Context, session db0.Session, semaphoreName string, inactiveTimeout time.Duration)) *SyncQueries_GetOrderedQueue_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 time.Duration
+ if args[3] != nil {
+ arg3 = args[3].(time.Duration)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetOrderedQueue_Call) Return(stateRecords []db.StateRecord, err error) *SyncQueries_GetOrderedQueue_Call {
+ _c.Call.Return(stateRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetOrderedQueue_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, semaphoreName string, inactiveTimeout time.Duration) ([]db.StateRecord, error)) *SyncQueries_GetOrderedQueue_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetPendingInQueue provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetPendingInQueue(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) ([]db.StateRecord, error) {
+ ret := _mock.Called(ctx, session, semaphoreName, holderKey, controllerName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetPendingInQueue")
+ }
+
+ var r0 []db.StateRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, string, string) ([]db.StateRecord, error)); ok {
+ return returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, string, string) []db.StateRecord); ok {
+ r0 = returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.StateRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, db0.Session, string, string, string) error); ok {
+ r1 = returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetPendingInQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPendingInQueue'
+type SyncQueries_GetPendingInQueue_Call struct {
+ *mock.Call
+}
+
+// GetPendingInQueue is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - semaphoreName string
+// - holderKey string
+// - controllerName string
+func (_e *SyncQueries_Expecter) GetPendingInQueue(ctx interface{}, session interface{}, semaphoreName interface{}, holderKey interface{}, controllerName interface{}) *SyncQueries_GetPendingInQueue_Call {
+ return &SyncQueries_GetPendingInQueue_Call{Call: _e.mock.On("GetPendingInQueue", ctx, session, semaphoreName, holderKey, controllerName)}
+}
+
+func (_c *SyncQueries_GetPendingInQueue_Call) Run(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string)) *SyncQueries_GetPendingInQueue_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 string
+ if args[3] != nil {
+ arg3 = args[3].(string)
+ }
+ var arg4 string
+ if args[4] != nil {
+ arg4 = args[4].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetPendingInQueue_Call) Return(stateRecords []db.StateRecord, err error) *SyncQueries_GetPendingInQueue_Call {
+ _c.Call.Return(stateRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetPendingInQueue_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) ([]db.StateRecord, error)) *SyncQueries_GetPendingInQueue_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetPendingInQueueWithSession provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetPendingInQueueWithSession(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) ([]db.StateRecord, error) {
+ ret := _mock.Called(ctx, session, semaphoreName, holderKey, controllerName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetPendingInQueueWithSession")
+ }
+
+ var r0 []db.StateRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, string, string) ([]db.StateRecord, error)); ok {
+ return returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, string, string) []db.StateRecord); ok {
+ r0 = returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]db.StateRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, db0.Session, string, string, string) error); ok {
+ r1 = returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetPendingInQueueWithSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPendingInQueueWithSession'
+type SyncQueries_GetPendingInQueueWithSession_Call struct {
+ *mock.Call
+}
+
+// GetPendingInQueueWithSession is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - semaphoreName string
+// - holderKey string
+// - controllerName string
+func (_e *SyncQueries_Expecter) GetPendingInQueueWithSession(ctx interface{}, session interface{}, semaphoreName interface{}, holderKey interface{}, controllerName interface{}) *SyncQueries_GetPendingInQueueWithSession_Call {
+ return &SyncQueries_GetPendingInQueueWithSession_Call{Call: _e.mock.On("GetPendingInQueueWithSession", ctx, session, semaphoreName, holderKey, controllerName)}
+}
+
+func (_c *SyncQueries_GetPendingInQueueWithSession_Call) Run(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string)) *SyncQueries_GetPendingInQueueWithSession_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 string
+ if args[3] != nil {
+ arg3 = args[3].(string)
+ }
+ var arg4 string
+ if args[4] != nil {
+ arg4 = args[4].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetPendingInQueueWithSession_Call) Return(stateRecords []db.StateRecord, err error) *SyncQueries_GetPendingInQueueWithSession_Call {
+ _c.Call.Return(stateRecords, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetPendingInQueueWithSession_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) ([]db.StateRecord, error)) *SyncQueries_GetPendingInQueueWithSession_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetSemaphoreLimit provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) GetSemaphoreLimit(ctx context.Context, dbKey string) (*db.LimitRecord, error) {
+ ret := _mock.Called(ctx, dbKey)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetSemaphoreLimit")
+ }
+
+ var r0 *db.LimitRecord
+ var r1 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string) (*db.LimitRecord, error)); ok {
+ return returnFunc(ctx, dbKey)
+ }
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string) *db.LimitRecord); ok {
+ r0 = returnFunc(ctx, dbKey)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*db.LimitRecord)
+ }
+ }
+ if returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = returnFunc(ctx, dbKey)
+ } else {
+ r1 = ret.Error(1)
+ }
+ return r0, r1
+}
+
+// SyncQueries_GetSemaphoreLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSemaphoreLimit'
+type SyncQueries_GetSemaphoreLimit_Call struct {
+ *mock.Call
+}
+
+// GetSemaphoreLimit is a helper method to define mock.On call
+// - ctx context.Context
+// - dbKey string
+func (_e *SyncQueries_Expecter) GetSemaphoreLimit(ctx interface{}, dbKey interface{}) *SyncQueries_GetSemaphoreLimit_Call {
+ return &SyncQueries_GetSemaphoreLimit_Call{Call: _e.mock.On("GetSemaphoreLimit", ctx, dbKey)}
+}
+
+func (_c *SyncQueries_GetSemaphoreLimit_Call) Run(run func(ctx context.Context, dbKey string)) *SyncQueries_GetSemaphoreLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_GetSemaphoreLimit_Call) Return(limitRecord *db.LimitRecord, err error) *SyncQueries_GetSemaphoreLimit_Call {
+ _c.Call.Return(limitRecord, err)
+ return _c
+}
+
+func (_c *SyncQueries_GetSemaphoreLimit_Call) RunAndReturn(run func(ctx context.Context, dbKey string) (*db.LimitRecord, error)) *SyncQueries_GetSemaphoreLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InsertControllerHealth provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) InsertControllerHealth(ctx context.Context, record *db.ControllerHealthRecord) error {
+ ret := _mock.Called(ctx, record)
+
+ if len(ret) == 0 {
+ panic("no return value specified for InsertControllerHealth")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, *db.ControllerHealthRecord) error); ok {
+ r0 = returnFunc(ctx, record)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_InsertControllerHealth_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertControllerHealth'
+type SyncQueries_InsertControllerHealth_Call struct {
+ *mock.Call
+}
+
+// InsertControllerHealth is a helper method to define mock.On call
+// - ctx context.Context
+// - record *db.ControllerHealthRecord
+func (_e *SyncQueries_Expecter) InsertControllerHealth(ctx interface{}, record interface{}) *SyncQueries_InsertControllerHealth_Call {
+ return &SyncQueries_InsertControllerHealth_Call{Call: _e.mock.On("InsertControllerHealth", ctx, record)}
+}
+
+func (_c *SyncQueries_InsertControllerHealth_Call) Run(run func(ctx context.Context, record *db.ControllerHealthRecord)) *SyncQueries_InsertControllerHealth_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 *db.ControllerHealthRecord
+ if args[1] != nil {
+ arg1 = args[1].(*db.ControllerHealthRecord)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_InsertControllerHealth_Call) Return(err error) *SyncQueries_InsertControllerHealth_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_InsertControllerHealth_Call) RunAndReturn(run func(ctx context.Context, record *db.ControllerHealthRecord) error) *SyncQueries_InsertControllerHealth_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InsertHeldState provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) InsertHeldState(ctx context.Context, session db0.Session, record *db.StateRecord) error {
+ ret := _mock.Called(ctx, session, record)
+
+ if len(ret) == 0 {
+ panic("no return value specified for InsertHeldState")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, *db.StateRecord) error); ok {
+ r0 = returnFunc(ctx, session, record)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_InsertHeldState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertHeldState'
+type SyncQueries_InsertHeldState_Call struct {
+ *mock.Call
+}
+
+// InsertHeldState is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - record *db.StateRecord
+func (_e *SyncQueries_Expecter) InsertHeldState(ctx interface{}, session interface{}, record interface{}) *SyncQueries_InsertHeldState_Call {
+ return &SyncQueries_InsertHeldState_Call{Call: _e.mock.On("InsertHeldState", ctx, session, record)}
+}
+
+func (_c *SyncQueries_InsertHeldState_Call) Run(run func(ctx context.Context, session db0.Session, record *db.StateRecord)) *SyncQueries_InsertHeldState_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 *db.StateRecord
+ if args[2] != nil {
+ arg2 = args[2].(*db.StateRecord)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_InsertHeldState_Call) Return(err error) *SyncQueries_InsertHeldState_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_InsertHeldState_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, record *db.StateRecord) error) *SyncQueries_InsertHeldState_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InsertHeldStateWithSession provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) InsertHeldStateWithSession(ctx context.Context, session db0.Session, record *db.StateRecord) error {
+ ret := _mock.Called(ctx, session, record)
+
+ if len(ret) == 0 {
+ panic("no return value specified for InsertHeldStateWithSession")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, *db.StateRecord) error); ok {
+ r0 = returnFunc(ctx, session, record)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_InsertHeldStateWithSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertHeldStateWithSession'
+type SyncQueries_InsertHeldStateWithSession_Call struct {
+ *mock.Call
+}
+
+// InsertHeldStateWithSession is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - record *db.StateRecord
+func (_e *SyncQueries_Expecter) InsertHeldStateWithSession(ctx interface{}, session interface{}, record interface{}) *SyncQueries_InsertHeldStateWithSession_Call {
+ return &SyncQueries_InsertHeldStateWithSession_Call{Call: _e.mock.On("InsertHeldStateWithSession", ctx, session, record)}
+}
+
+func (_c *SyncQueries_InsertHeldStateWithSession_Call) Run(run func(ctx context.Context, session db0.Session, record *db.StateRecord)) *SyncQueries_InsertHeldStateWithSession_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 *db.StateRecord
+ if args[2] != nil {
+ arg2 = args[2].(*db.StateRecord)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_InsertHeldStateWithSession_Call) Return(err error) *SyncQueries_InsertHeldStateWithSession_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_InsertHeldStateWithSession_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, record *db.StateRecord) error) *SyncQueries_InsertHeldStateWithSession_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// InsertLock provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) InsertLock(ctx context.Context, record *db.LockRecord) error {
+ ret := _mock.Called(ctx, record)
+
+ if len(ret) == 0 {
+ panic("no return value specified for InsertLock")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, *db.LockRecord) error); ok {
+ r0 = returnFunc(ctx, record)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_InsertLock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InsertLock'
+type SyncQueries_InsertLock_Call struct {
+ *mock.Call
+}
+
+// InsertLock is a helper method to define mock.On call
+// - ctx context.Context
+// - record *db.LockRecord
+func (_e *SyncQueries_Expecter) InsertLock(ctx interface{}, record interface{}) *SyncQueries_InsertLock_Call {
+ return &SyncQueries_InsertLock_Call{Call: _e.mock.On("InsertLock", ctx, record)}
+}
+
+func (_c *SyncQueries_InsertLock_Call) Run(run func(ctx context.Context, record *db.LockRecord)) *SyncQueries_InsertLock_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 *db.LockRecord
+ if args[1] != nil {
+ arg1 = args[1].(*db.LockRecord)
+ }
+ run(
+ arg0,
+ arg1,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_InsertLock_Call) Return(err error) *SyncQueries_InsertLock_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_InsertLock_Call) RunAndReturn(run func(ctx context.Context, record *db.LockRecord) error) *SyncQueries_InsertLock_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// ReleaseHeld provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) ReleaseHeld(ctx context.Context, semaphoreName string, key string, controllerName string) error {
+ ret := _mock.Called(ctx, semaphoreName, key, controllerName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ReleaseHeld")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string) error); ok {
+ r0 = returnFunc(ctx, semaphoreName, key, controllerName)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_ReleaseHeld_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReleaseHeld'
+type SyncQueries_ReleaseHeld_Call struct {
+ *mock.Call
+}
+
+// ReleaseHeld is a helper method to define mock.On call
+// - ctx context.Context
+// - semaphoreName string
+// - key string
+// - controllerName string
+func (_e *SyncQueries_Expecter) ReleaseHeld(ctx interface{}, semaphoreName interface{}, key interface{}, controllerName interface{}) *SyncQueries_ReleaseHeld_Call {
+ return &SyncQueries_ReleaseHeld_Call{Call: _e.mock.On("ReleaseHeld", ctx, semaphoreName, key, controllerName)}
+}
+
+func (_c *SyncQueries_ReleaseHeld_Call) Run(run func(ctx context.Context, semaphoreName string, key string, controllerName string)) *SyncQueries_ReleaseHeld_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 string
+ if args[3] != nil {
+ arg3 = args[3].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_ReleaseHeld_Call) Return(err error) *SyncQueries_ReleaseHeld_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_ReleaseHeld_Call) RunAndReturn(run func(ctx context.Context, semaphoreName string, key string, controllerName string) error) *SyncQueries_ReleaseHeld_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RemoveFromQueue provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) RemoveFromQueue(ctx context.Context, semaphoreName string, holderKey string) error {
+ ret := _mock.Called(ctx, semaphoreName, holderKey)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RemoveFromQueue")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, string) error); ok {
+ r0 = returnFunc(ctx, semaphoreName, holderKey)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_RemoveFromQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveFromQueue'
+type SyncQueries_RemoveFromQueue_Call struct {
+ *mock.Call
+}
+
+// RemoveFromQueue is a helper method to define mock.On call
+// - ctx context.Context
+// - semaphoreName string
+// - holderKey string
+func (_e *SyncQueries_Expecter) RemoveFromQueue(ctx interface{}, semaphoreName interface{}, holderKey interface{}) *SyncQueries_RemoveFromQueue_Call {
+ return &SyncQueries_RemoveFromQueue_Call{Call: _e.mock.On("RemoveFromQueue", ctx, semaphoreName, holderKey)}
+}
+
+func (_c *SyncQueries_RemoveFromQueue_Call) Run(run func(ctx context.Context, semaphoreName string, holderKey string)) *SyncQueries_RemoveFromQueue_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_RemoveFromQueue_Call) Return(err error) *SyncQueries_RemoveFromQueue_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_RemoveFromQueue_Call) RunAndReturn(run func(ctx context.Context, semaphoreName string, holderKey string) error) *SyncQueries_RemoveFromQueue_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateControllerTimestamp provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) UpdateControllerTimestamp(ctx context.Context, controllerName string, timestamp time.Time) error {
+ ret := _mock.Called(ctx, controllerName, timestamp)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateControllerTimestamp")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, time.Time) error); ok {
+ r0 = returnFunc(ctx, controllerName, timestamp)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_UpdateControllerTimestamp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateControllerTimestamp'
+type SyncQueries_UpdateControllerTimestamp_Call struct {
+ *mock.Call
+}
+
+// UpdateControllerTimestamp is a helper method to define mock.On call
+// - ctx context.Context
+// - controllerName string
+// - timestamp time.Time
+func (_e *SyncQueries_Expecter) UpdateControllerTimestamp(ctx interface{}, controllerName interface{}, timestamp interface{}) *SyncQueries_UpdateControllerTimestamp_Call {
+ return &SyncQueries_UpdateControllerTimestamp_Call{Call: _e.mock.On("UpdateControllerTimestamp", ctx, controllerName, timestamp)}
+}
+
+func (_c *SyncQueries_UpdateControllerTimestamp_Call) Run(run func(ctx context.Context, controllerName string, timestamp time.Time)) *SyncQueries_UpdateControllerTimestamp_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 time.Time
+ if args[2] != nil {
+ arg2 = args[2].(time.Time)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_UpdateControllerTimestamp_Call) Return(err error) *SyncQueries_UpdateControllerTimestamp_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_UpdateControllerTimestamp_Call) RunAndReturn(run func(ctx context.Context, controllerName string, timestamp time.Time) error) *SyncQueries_UpdateControllerTimestamp_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateSemaphoreLimit provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) UpdateSemaphoreLimit(ctx context.Context, name string, sizeLimit int) error {
+ ret := _mock.Called(ctx, name, sizeLimit)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateSemaphoreLimit")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, string, int) error); ok {
+ r0 = returnFunc(ctx, name, sizeLimit)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_UpdateSemaphoreLimit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateSemaphoreLimit'
+type SyncQueries_UpdateSemaphoreLimit_Call struct {
+ *mock.Call
+}
+
+// UpdateSemaphoreLimit is a helper method to define mock.On call
+// - ctx context.Context
+// - name string
+// - sizeLimit int
+func (_e *SyncQueries_Expecter) UpdateSemaphoreLimit(ctx interface{}, name interface{}, sizeLimit interface{}) *SyncQueries_UpdateSemaphoreLimit_Call {
+ return &SyncQueries_UpdateSemaphoreLimit_Call{Call: _e.mock.On("UpdateSemaphoreLimit", ctx, name, sizeLimit)}
+}
+
+func (_c *SyncQueries_UpdateSemaphoreLimit_Call) Run(run func(ctx context.Context, name string, sizeLimit int)) *SyncQueries_UpdateSemaphoreLimit_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 string
+ if args[1] != nil {
+ arg1 = args[1].(string)
+ }
+ var arg2 int
+ if args[2] != nil {
+ arg2 = args[2].(int)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_UpdateSemaphoreLimit_Call) Return(err error) *SyncQueries_UpdateSemaphoreLimit_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_UpdateSemaphoreLimit_Call) RunAndReturn(run func(ctx context.Context, name string, sizeLimit int) error) *SyncQueries_UpdateSemaphoreLimit_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateStateToHeld provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) UpdateStateToHeld(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) error {
+ ret := _mock.Called(ctx, session, semaphoreName, holderKey, controllerName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateStateToHeld")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, string, string) error); ok {
+ r0 = returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_UpdateStateToHeld_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateToHeld'
+type SyncQueries_UpdateStateToHeld_Call struct {
+ *mock.Call
+}
+
+// UpdateStateToHeld is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - semaphoreName string
+// - holderKey string
+// - controllerName string
+func (_e *SyncQueries_Expecter) UpdateStateToHeld(ctx interface{}, session interface{}, semaphoreName interface{}, holderKey interface{}, controllerName interface{}) *SyncQueries_UpdateStateToHeld_Call {
+ return &SyncQueries_UpdateStateToHeld_Call{Call: _e.mock.On("UpdateStateToHeld", ctx, session, semaphoreName, holderKey, controllerName)}
+}
+
+func (_c *SyncQueries_UpdateStateToHeld_Call) Run(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string)) *SyncQueries_UpdateStateToHeld_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 string
+ if args[3] != nil {
+ arg3 = args[3].(string)
+ }
+ var arg4 string
+ if args[4] != nil {
+ arg4 = args[4].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_UpdateStateToHeld_Call) Return(err error) *SyncQueries_UpdateStateToHeld_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_UpdateStateToHeld_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) error) *SyncQueries_UpdateStateToHeld_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateStateToHeldWithSession provides a mock function for the type SyncQueries
+func (_mock *SyncQueries) UpdateStateToHeldWithSession(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) error {
+ ret := _mock.Called(ctx, session, semaphoreName, holderKey, controllerName)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateStateToHeldWithSession")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, db0.Session, string, string, string) error); ok {
+ r0 = returnFunc(ctx, session, semaphoreName, holderKey, controllerName)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// SyncQueries_UpdateStateToHeldWithSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateToHeldWithSession'
+type SyncQueries_UpdateStateToHeldWithSession_Call struct {
+ *mock.Call
+}
+
+// UpdateStateToHeldWithSession is a helper method to define mock.On call
+// - ctx context.Context
+// - session db0.Session
+// - semaphoreName string
+// - holderKey string
+// - controllerName string
+func (_e *SyncQueries_Expecter) UpdateStateToHeldWithSession(ctx interface{}, session interface{}, semaphoreName interface{}, holderKey interface{}, controllerName interface{}) *SyncQueries_UpdateStateToHeldWithSession_Call {
+ return &SyncQueries_UpdateStateToHeldWithSession_Call{Call: _e.mock.On("UpdateStateToHeldWithSession", ctx, session, semaphoreName, holderKey, controllerName)}
+}
+
+func (_c *SyncQueries_UpdateStateToHeldWithSession_Call) Run(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string)) *SyncQueries_UpdateStateToHeldWithSession_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 db0.Session
+ if args[1] != nil {
+ arg1 = args[1].(db0.Session)
+ }
+ var arg2 string
+ if args[2] != nil {
+ arg2 = args[2].(string)
+ }
+ var arg3 string
+ if args[3] != nil {
+ arg3 = args[3].(string)
+ }
+ var arg4 string
+ if args[4] != nil {
+ arg4 = args[4].(string)
+ }
+ run(
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ )
+ })
+ return _c
+}
+
+func (_c *SyncQueries_UpdateStateToHeldWithSession_Call) Return(err error) *SyncQueries_UpdateStateToHeldWithSession_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *SyncQueries_UpdateStateToHeldWithSession_Call) RunAndReturn(run func(ctx context.Context, session db0.Session, semaphoreName string, holderKey string, controllerName string) error) *SyncQueries_UpdateStateToHeldWithSession_Call {
+ _c.Call.Return(run)
+ return _c
+}
diff --git a/util/sync/db/queries.go b/util/sync/db/queries.go
new file mode 100644
index 000000000000..058b3dfd8492
--- /dev/null
+++ b/util/sync/db/queries.go
@@ -0,0 +1,342 @@
+package db
+
+import (
+ "context"
+ "time"
+
+ "github.com/upper/db/v4"
+)
+
+// Record types for database operations
+type LimitRecord struct {
+ Name string `db:"name"`
+ SizeLimit int `db:"sizelimit"`
+}
+
+type StateRecord struct {
+ Name string `db:"name"` // semaphore name identifier
+ Key string `db:"workflowkey"` // workflow key holding or waiting for the lock of the form /
+ Controller string `db:"controller"` // controller where the workflow is running
+ Held bool `db:"held"`
+ Priority int32 `db:"priority"` // higher number = higher priority in queue
+ Time time.Time `db:"time"` // timestamp of creation or last update
+}
+
+type ControllerHealthRecord struct {
+ Controller string `db:"controller"` // controller where the workflow is running
+ Time time.Time `db:"time"` // timestamp of creation or last update
+}
+
+type LockRecord struct {
+ Name string `db:"name"` // semaphore name identifier
+ Controller string `db:"controller"` // controller where the workflow is running
+ Time time.Time `db:"time"` // timestamp of creation
+}
+
+// Field name constants
+const (
+ LimitNameField = "name"
+ LimitSizeField = "sizelimit"
+
+ StateNameField = "name"
+ StateKeyField = "workflowkey"
+ StateControllerField = "controller"
+ StateHeldField = "held"
+ StatePriorityField = "priority"
+ StateTimeField = "time"
+
+ ControllerNameField = "controller"
+ ControllerTimeField = "time"
+
+ LockNameField = "name"
+ LockControllerField = "controller"
+)
+
+type SyncQueries interface {
+ CreateSemaphoreLimit(ctx context.Context, name string, sizeLimit int) error
+ UpdateSemaphoreLimit(ctx context.Context, name string, sizeLimit int) error
+ DeleteSemaphoreLimit(ctx context.Context, name string) error
+ GetSemaphoreLimit(ctx context.Context, dbKey string) (*LimitRecord, error)
+
+ GetCurrentState(ctx context.Context, session db.Session, semaphoreName string, held bool) ([]StateRecord, error)
+ GetCurrentHolders(ctx context.Context, session db.Session, semaphoreName string) ([]StateRecord, error)
+ GetCurrentPending(ctx context.Context, semaphoreName string) ([]StateRecord, error)
+ GetOrderedQueue(ctx context.Context, session db.Session, semaphoreName string, inactiveTimeout time.Duration) ([]StateRecord, error)
+ AddToQueue(ctx context.Context, record *StateRecord) error
+ RemoveFromQueue(ctx context.Context, semaphoreName, holderKey string) error
+ CheckQueueExists(ctx context.Context, semaphoreName, holderKey, controllerName string) ([]StateRecord, error)
+ UpdateStateToHeld(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) error
+ InsertHeldState(ctx context.Context, session db.Session, record *StateRecord) error
+ GetPendingInQueue(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) ([]StateRecord, error)
+ ReleaseHeld(ctx context.Context, semaphoreName, key, controllerName string) error
+
+ GetExistingLocks(ctx context.Context, lockName, controllerName string) ([]LockRecord, error)
+ InsertLock(ctx context.Context, record *LockRecord) error
+ DeleteLock(ctx context.Context, lockName string) error
+ ExpireInactiveLocks(ctx context.Context, inactiveTimeout time.Duration) (int64, error)
+
+ InsertControllerHealth(ctx context.Context, record *ControllerHealthRecord) error
+ UpdateControllerTimestamp(ctx context.Context, controllerName string, timestamp time.Time) error
+
+ GetPendingInQueueWithSession(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) ([]StateRecord, error)
+ UpdateStateToHeldWithSession(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) error
+ InsertHeldStateWithSession(ctx context.Context, session db.Session, record *StateRecord) error
+}
+
+var _ SyncQueries = &syncQueries{}
+
+// syncQueries holds all SQL query operations for the sync package
+type syncQueries struct {
+ config dbConfig
+ session db.Session
+}
+
+// NewSyncQueries creates a new syncQueries instance
+func NewSyncQueries(session db.Session, config dbConfig) *syncQueries {
+ return &syncQueries{
+ config: config,
+ session: session,
+ }
+}
+
+// Limit operations
+func (q *syncQueries) CreateSemaphoreLimit(ctx context.Context, name string, sizeLimit int) error {
+ _, err := q.session.Collection(q.config.LimitTable).Insert(&LimitRecord{
+ Name: name,
+ SizeLimit: sizeLimit,
+ })
+ return err
+}
+
+func (q *syncQueries) UpdateSemaphoreLimit(ctx context.Context, name string, sizeLimit int) error {
+ resp, err := q.session.SQL().Update(q.config.LimitTable).
+ Set(LimitSizeField, sizeLimit).
+ Where(db.Cond{LimitNameField: name}).
+ Exec()
+
+ if err != nil {
+ return err
+ }
+
+ affectedRows, err := resp.RowsAffected()
+ if err != nil {
+ return err
+ }
+
+ if affectedRows == 0 {
+ return db.ErrNoMoreRows
+ }
+
+ return nil
+}
+
+func (q *syncQueries) DeleteSemaphoreLimit(ctx context.Context, name string) error {
+ _, err := q.session.SQL().DeleteFrom(q.config.LimitTable).
+ Where(db.Cond{LimitNameField: name}).
+ Exec()
+ return err
+}
+
+func (q *syncQueries) GetSemaphoreLimit(ctx context.Context, name string) (*LimitRecord, error) {
+ limit := &LimitRecord{}
+ err := q.session.SQL().
+ Select(LimitSizeField).
+ From(q.config.LimitTable).
+ Where(db.Cond{LimitNameField: name}).
+ One(limit)
+ return limit, err
+}
+
+// State operations
+func (q *syncQueries) GetCurrentState(ctx context.Context, session db.Session, semaphoreName string, held bool) ([]StateRecord, error) {
+ var states []StateRecord
+ err := session.SQL().
+ Select(StateKeyField).
+ From(q.config.StateTable).
+ Where(db.Cond{StateHeldField: held}).
+ And(db.Cond{StateNameField: semaphoreName}).
+ All(&states)
+ return states, err
+}
+
+func (q *syncQueries) GetCurrentHolders(ctx context.Context, session db.Session, semaphoreName string) ([]StateRecord, error) {
+ return q.GetCurrentState(ctx, session, semaphoreName, true)
+}
+
+func (q *syncQueries) GetCurrentPending(ctx context.Context, semaphoreName string) ([]StateRecord, error) {
+ return q.GetCurrentState(ctx, q.session, semaphoreName, false)
+}
+
+func (q *syncQueries) GetOrderedQueue(ctx context.Context, session db.Session, semaphoreName string, inactiveTimeout time.Duration) ([]StateRecord, error) {
+ since := time.Now().Add(-inactiveTimeout)
+ var queue []StateRecord
+ subquery := session.SQL().
+ Select(ControllerNameField).
+ From(q.config.ControllerTable).
+ And(db.Cond{ControllerTimeField + " >": since})
+
+ err := session.SQL().
+ Select(StateKeyField, StateControllerField).
+ From(q.config.StateTable).
+ Where(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateHeldField: false}).
+ And(db.Cond{
+ "controller IN": subquery,
+ }).
+ OrderBy(StatePriorityField+" DESC", StateTimeField+" ASC").
+ All(&queue)
+
+ return queue, err
+}
+
+func (q *syncQueries) AddToQueue(ctx context.Context, record *StateRecord) error {
+ _, err := q.session.Collection(q.config.StateTable).Insert(record)
+ return err
+}
+
+func (q *syncQueries) RemoveFromQueue(ctx context.Context, semaphoreName, holderKey string) error {
+ _, err := q.session.SQL().
+ DeleteFrom(q.config.StateTable).
+ Where(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateKeyField: holderKey}).
+ And(db.Cond{StateHeldField: false}).
+ Exec()
+ return err
+}
+
+func (q *syncQueries) CheckQueueExists(ctx context.Context, semaphoreName, holderKey, controllerName string) ([]StateRecord, error) {
+ var states []StateRecord
+ err := q.session.SQL().
+ Select(StateKeyField).
+ From(q.config.StateTable).
+ Where(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateKeyField: holderKey}).
+ And(db.Cond{StateControllerField: controllerName}).
+ All(&states)
+ return states, err
+}
+
+func (q *syncQueries) UpdateStateToHeld(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) error {
+ _, err := session.SQL().Update(q.config.StateTable).
+ Set(StateHeldField, true).
+ Where(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateKeyField: holderKey}).
+ And(db.Cond{StateControllerField: controllerName}).
+ And(db.Cond{StateHeldField: false}).
+ Exec()
+ return err
+}
+
+func (q *syncQueries) InsertHeldState(ctx context.Context, session db.Session, record *StateRecord) error {
+ _, err := session.Collection(q.config.StateTable).Insert(record)
+ return err
+}
+
+func (q *syncQueries) GetPendingInQueue(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) ([]StateRecord, error) {
+ var pending []StateRecord
+ err := session.SQL().
+ Select(StateKeyField).
+ From(q.config.StateTable).
+ Where(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateKeyField: holderKey}).
+ And(db.Cond{StateControllerField: controllerName}).
+ And(db.Cond{StateHeldField: false}).
+ All(&pending)
+ return pending, err
+}
+
+func (q *syncQueries) ReleaseHeld(ctx context.Context, semaphoreName, key, controllerName string) error {
+ _, err := q.session.SQL().
+ DeleteFrom(q.config.StateTable).
+ Where(db.Cond{StateHeldField: true}).
+ And(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateKeyField: key}).
+ And(db.Cond{StateControllerField: controllerName}).
+ Exec()
+ return err
+}
+
+// Lock operations
+func (q *syncQueries) GetExistingLocks(ctx context.Context, lockName, controllerName string) ([]LockRecord, error) {
+ var existingLocks []LockRecord
+ err := q.session.SQL().
+ Select(LockNameField).
+ From(q.config.LockTable).
+ Where(db.Cond{LockNameField: lockName}).
+ And(db.Cond{LockControllerField: controllerName}).
+ All(&existingLocks)
+ return existingLocks, err
+}
+
+func (q *syncQueries) InsertLock(ctx context.Context, record *LockRecord) error {
+ _, err := q.session.Collection(q.config.LockTable).Insert(record)
+ return err
+}
+
+func (q *syncQueries) DeleteLock(ctx context.Context, lockName string) error {
+ _, err := q.session.SQL().
+ DeleteFrom(q.config.LockTable).
+ Where(db.Cond{LockNameField: lockName}).
+ Exec()
+ return err
+}
+
+func (q *syncQueries) ExpireInactiveLocks(ctx context.Context, inactiveTimeout time.Duration) (int64, error) {
+ since := time.Now().Add(-inactiveTimeout)
+ subquery := q.session.SQL().
+ Select(ControllerNameField).
+ From(q.config.ControllerTable).
+ And(db.Cond{ControllerTimeField + " <=": since})
+
+ result, err := q.session.SQL().DeleteFrom(q.config.LockTable).
+ Where(db.Cond{LockControllerField + " IN": subquery}).
+ Exec()
+ if err != nil {
+ return 0, err
+ }
+ return result.RowsAffected()
+}
+
+// Controller operations
+func (q *syncQueries) InsertControllerHealth(ctx context.Context, record *ControllerHealthRecord) error {
+ _, err := q.session.Collection(q.config.ControllerTable).Insert(record)
+ return err
+}
+
+func (q *syncQueries) UpdateControllerTimestamp(ctx context.Context, controllerName string, timestamp time.Time) error {
+ _, err := q.session.SQL().Update(q.config.ControllerTable).
+ Set(ControllerTimeField, timestamp).
+ Where(db.Cond{ControllerNameField: controllerName}).
+ Exec()
+ return err
+}
+
+// Transaction-based operations for acquire/release operations
+func (q *syncQueries) GetPendingInQueueWithSession(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) ([]StateRecord, error) {
+ var pending []StateRecord
+ err := session.SQL().
+ Select(StateKeyField).
+ From(q.config.StateTable).
+ Where(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateKeyField: holderKey}).
+ And(db.Cond{StateControllerField: controllerName}).
+ And(db.Cond{StateHeldField: false}).
+ All(&pending)
+ return pending, err
+}
+
+func (q *syncQueries) UpdateStateToHeldWithSession(ctx context.Context, session db.Session, semaphoreName, holderKey, controllerName string) error {
+ _, err := session.SQL().Update(q.config.StateTable).
+ Set(StateHeldField, true).
+ Where(db.Cond{StateNameField: semaphoreName}).
+ And(db.Cond{StateKeyField: holderKey}).
+ And(db.Cond{StateControllerField: controllerName}).
+ And(db.Cond{StateHeldField: false}).
+ Exec()
+ return err
+}
+
+func (q *syncQueries) InsertHeldStateWithSession(ctx context.Context, session db.Session, record *StateRecord) error {
+ _, err := session.Collection(q.config.StateTable).Insert(record)
+ return err
+}
diff --git a/workflow/sync/database_helper_test.go b/workflow/sync/database_helper_test.go
index a0e18386db03..2b64db4521eb 100644
--- a/workflow/sync/database_helper_test.go
+++ b/workflow/sync/database_helper_test.go
@@ -14,6 +14,7 @@ import (
"github.com/argoproj/argo-workflows/v3/config"
"github.com/argoproj/argo-workflows/v3/util/sqldb"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
)
const (
@@ -23,7 +24,7 @@ const (
)
// createTestDBSession creates a test database session
-func createTestDBSession(ctx context.Context, t *testing.T, dbType sqldb.DBType) (dbInfo, func(), config.SyncConfig, error) {
+func createTestDBSession(ctx context.Context, t *testing.T, dbType sqldb.DBType) (syncdb.DBInfo, func(), config.SyncConfig, error) {
t.Helper()
var cfg config.SyncConfig
@@ -40,28 +41,28 @@ func createTestDBSession(ctx context.Context, t *testing.T, dbType sqldb.DBType)
t.Fatalf("failed to start container: %s", err)
}
- info := dbInfo{
- config: dbConfigFromConfig(&cfg),
- session: dbSessionFromConfigWithCreds(&cfg, testDBUser, testDBPassword),
+ info := syncdb.DBInfo{
+ Config: syncdb.DBConfigFromConfig(&cfg),
+ Session: syncdb.DBSessionFromConfigWithCreds(&cfg, testDBUser, testDBPassword),
}
- require.NotNil(t, info.session, "failed to create database session")
+ require.NotNil(t, info.Session, "failed to create database session")
deferfn := func() {
- info.session.Close()
+ info.Session.Close()
termContainerFn()
}
- info.migrate(ctx)
- require.NotNil(t, info.session, "failed to migrate database")
+ info.Migrate(ctx)
+ require.NotNil(t, info.Session, "failed to migrate database")
// Mark this controller as alive immediately
- _, err = info.session.Collection(info.config.controllerTable).
- Insert(&controllerHealthRecord{
- Controller: info.config.controllerName,
+ _, err = info.Session.Collection(info.Config.ControllerTable).
+ Insert(&syncdb.ControllerHealthRecord{
+ Controller: info.Config.ControllerName,
Time: time.Now(),
})
if err != nil {
- info.session.Close()
- info.session = nil
+ info.Session.Close()
+ info.Session = nil
return info, deferfn, cfg, err
}
diff --git a/workflow/sync/database_mutex.go b/workflow/sync/database_mutex.go
index 2647b0d1ba6e..1d1b5769b038 100644
--- a/workflow/sync/database_mutex.go
+++ b/workflow/sync/database_mutex.go
@@ -1,6 +1,10 @@
package sync
-func newDatabaseMutex(name string, dbKey string, nextWorkflow NextWorkflow, info dbInfo) *databaseSemaphore {
+import (
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
+)
+
+func newDatabaseMutex(name string, dbKey string, nextWorkflow NextWorkflow, info syncdb.DBInfo) *databaseSemaphore {
logger := syncLogger{
name: name,
lockType: lockTypeMutex,
@@ -12,6 +16,7 @@ func newDatabaseMutex(name string, dbKey string, nextWorkflow NextWorkflow, info
nextWorkflow: nextWorkflow,
logger: logger.get,
info: info,
+ queries: syncdb.NewSyncQueries(info.Session, info.Config),
isMutex: true,
}
}
diff --git a/workflow/sync/database_mutex_test.go b/workflow/sync/database_mutex_test.go
index db6eab5b9287..0a08270137b2 100644
--- a/workflow/sync/database_mutex_test.go
+++ b/workflow/sync/database_mutex_test.go
@@ -23,7 +23,7 @@ func createTestDatabaseMutex(ctx context.Context, t *testing.T, name, namespace
// Create a mutex (which is a semaphore with limit=1)
mutex := newDatabaseMutex(name, dbKey, nextWorkflow, info)
require.NotNil(t, mutex)
- tx := &transaction{db: &info.session}
+ tx := &transaction{db: &info.Session}
return mutex, tx, deferfunc
}
diff --git a/workflow/sync/database_semaphore.go b/workflow/sync/database_semaphore.go
index a645eb6115d4..72b78ba907bc 100644
--- a/workflow/sync/database_semaphore.go
+++ b/workflow/sync/database_semaphore.go
@@ -9,6 +9,7 @@ import (
"github.com/upper/db/v4"
"github.com/argoproj/argo-workflows/v3/util/logging"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
)
type databaseSemaphore struct {
@@ -17,57 +18,14 @@ type databaseSemaphore struct {
shortDBKey string
nextWorkflow NextWorkflow
logger loggerFn
- info dbInfo
+ info syncdb.DBInfo
+ queries syncdb.SyncQueries
isMutex bool
}
-type limitRecord struct {
- Name string `db:"name"`
- SizeLimit int `db:"sizelimit"`
-}
-
-type stateRecord struct {
- Name string `db:"name"` // semaphore name identifier
- Key string `db:"workflowkey"` // workflow key holding or waiting for the lock of the form /
- Controller string `db:"controller"` // controller where the workflow is running
- Held bool `db:"held"`
- Priority int32 `db:"priority"` // higher number = higher priority in queue
- Time time.Time `db:"time"` // timestamp of creation or last update
-}
-
-type controllerHealthRecord struct {
- Controller string `db:"controller"` // controller where the workflow is running
- Time time.Time `db:"time"` // timestamp of creation or last update
-}
-
-type lockRecord struct {
- Name string `db:"name"` // semaphore name identifier
- Controller string `db:"controller"` // controller where the workflow is running
- Time time.Time `db:"time"` // timestamp of creation
-}
-
-const (
- limitNameField = "name"
- limitSizeField = "sizelimit"
-
- stateNameField = "name"
- stateKeyField = "workflowkey"
- stateControllerField = "controller"
- stateHeldField = "held"
- statePriorityField = "priority"
- stateTimeField = "time"
-
- controllerNameField = "controller"
- controllerTimeField = "time"
-
- lockNameField = "name"
- lockControllerField = "controller"
- lockTimeField = "time"
-)
-
var _ semaphore = &databaseSemaphore{}
-func newDatabaseSemaphore(ctx context.Context, name string, dbKey string, nextWorkflow NextWorkflow, info dbInfo, syncLimitCacheTTL time.Duration) (*databaseSemaphore, error) {
+func newDatabaseSemaphore(ctx context.Context, name string, dbKey string, nextWorkflow NextWorkflow, info syncdb.DBInfo, syncLimitCacheTTL time.Duration) (*databaseSemaphore, error) {
logger := syncLogger{
name: name,
lockType: lockTypeSemaphore,
@@ -79,6 +37,7 @@ func newDatabaseSemaphore(ctx context.Context, name string, dbKey string, nextWo
nextWorkflow: nextWorkflow,
logger: logger.get,
info: info,
+ queries: syncdb.NewSyncQueries(info.Session, info.Config),
isMutex: false,
}
sem.limitGetter = newCachedLimit(sem.getLimitFromDB, syncLimitCacheTTL)
@@ -104,12 +63,7 @@ func (s *databaseSemaphore) getName() string {
func (s *databaseSemaphore) getLimitFromDB(ctx context.Context, _ string) (int, error) {
logger := s.logger(ctx)
// Update the limit from the database
- limit := &limitRecord{}
- err := s.info.session.SQL().
- Select(limitSizeField).
- From(s.info.config.limitTable).
- Where(db.Cond{limitNameField: s.shortDBKey}).
- One(limit)
+ limit, err := s.queries.GetSemaphoreLimit(ctx, s.shortDBKey)
if err != nil {
logger.WithField("key", s.shortDBKey).WithError(err).Error(ctx, "Failed to get limit")
return 0, err
@@ -136,13 +90,7 @@ func (s *databaseSemaphore) getLimit(ctx context.Context) int {
func (s *databaseSemaphore) currentState(ctx context.Context, session db.Session, held bool) ([]string, error) {
logger := s.logger(ctx)
- var states []stateRecord
- err := session.SQL().
- Select(stateKeyField).
- From(s.info.config.stateTable).
- Where(db.Cond{stateHeldField: held}).
- And(db.Cond{stateNameField: s.longDBKey()}).
- All(&states)
+ states, err := s.queries.GetCurrentState(ctx, session, s.longDBKey(), held)
if err != nil {
logger.WithField("held", held).WithError(err).Error(ctx, "Failed to get current state")
return nil, err
@@ -155,11 +103,11 @@ func (s *databaseSemaphore) currentState(ctx context.Context, session db.Session
}
func (s *databaseSemaphore) getCurrentPending(ctx context.Context) ([]string, error) {
- return s.currentState(ctx, s.info.session, false)
+ return s.currentState(ctx, s.info.Session, false)
}
func (s *databaseSemaphore) getCurrentHolders(ctx context.Context) ([]string, error) {
- return s.currentHoldersSession(ctx, s.info.session)
+ return s.currentHoldersSession(ctx, s.info.Session)
}
func (s *databaseSemaphore) currentHoldersSession(ctx context.Context, session db.Session) ([]string, error) {
@@ -169,13 +117,7 @@ func (s *databaseSemaphore) currentHoldersSession(ctx context.Context, session d
func (s *databaseSemaphore) lock(ctx context.Context) bool {
logger := s.logger(ctx)
// Check if lock already exists, in case we crashed and restarted
- var existingLocks []lockRecord
- err := s.info.session.SQL().
- Select(lockNameField).
- From(s.info.config.lockTable).
- Where(db.Cond{lockNameField: s.longDBKey()}).
- And(db.Cond{lockControllerField: s.info.config.controllerName}).
- All(&existingLocks)
+ existingLocks, err := s.queries.GetExistingLocks(ctx, s.longDBKey(), s.info.Config.ControllerName)
if err == nil && len(existingLocks) > 0 {
// Lock already exists
@@ -183,21 +125,18 @@ func (s *databaseSemaphore) lock(ctx context.Context) bool {
return true
}
- record := &lockRecord{
+ record := &syncdb.LockRecord{
Name: s.longDBKey(),
- Controller: s.info.config.controllerName,
+ Controller: s.info.Config.ControllerName,
Time: time.Now(),
}
- _, err = s.info.session.Collection(s.info.config.lockTable).Insert(record)
+ err = s.queries.InsertLock(ctx, record)
return err == nil
}
-func (s *databaseSemaphore) unlock(_ context.Context) {
+func (s *databaseSemaphore) unlock(ctx context.Context) {
for {
- _, err := s.info.session.SQL().
- DeleteFrom(s.info.config.lockTable).
- Where(db.Cond{lockNameField: s.longDBKey()}).
- Exec()
+ err := s.queries.DeleteLock(ctx, s.longDBKey())
if err == nil {
break
}
@@ -207,13 +146,7 @@ func (s *databaseSemaphore) unlock(_ context.Context) {
func (s *databaseSemaphore) release(ctx context.Context, key string) bool {
logger := s.logger(ctx)
- _, err := s.info.session.SQL().
- DeleteFrom(s.info.config.stateTable).
- Where(db.Cond{stateHeldField: true}).
- And(db.Cond{stateNameField: s.longDBKey()}).
- And(db.Cond{stateKeyField: key}).
- And(db.Cond{stateControllerField: s.info.config.controllerName}).
- Exec()
+ err := s.queries.ReleaseHeld(ctx, s.longDBKey(), key, s.info.Config.ControllerName)
switch err {
case nil:
@@ -226,26 +159,9 @@ func (s *databaseSemaphore) release(ctx context.Context, key string) bool {
}
}
-func (s *databaseSemaphore) queueOrdered(ctx context.Context, session db.Session) ([]stateRecord, error) {
+func (s *databaseSemaphore) queueOrdered(ctx context.Context, session db.Session) ([]syncdb.StateRecord, error) {
logger := s.logger(ctx)
- since := time.Now().Add(-s.info.config.inactiveControllerTimeout)
- var queue []stateRecord
- subquery := session.SQL().
- Select(controllerNameField).
- From(s.info.config.controllerTable).
- And(db.Cond{controllerTimeField + " >": since})
-
- err := session.SQL().
- Select(stateKeyField, stateControllerField).
- From(s.info.config.stateTable).
- Where(db.Cond{stateNameField: s.longDBKey()}).
- And(db.Cond{stateHeldField: false}).
- And(db.Cond{
- "controller IN": subquery,
- }).
- OrderBy(statePriorityField+" DESC", stateTimeField+" ASC").
- All(&queue)
-
+ queue, err := s.queries.GetOrderedQueue(ctx, session, s.longDBKey(), s.info.Config.InactiveControllerTimeout)
if err != nil {
logger.WithError(err).Error(ctx, "Failed to get ordered queue for semaphore notification")
return nil, err
@@ -266,7 +182,7 @@ func (s *databaseSemaphore) notifyWaiters(ctx context.Context) {
}
holdCount := len(holders)
- pending, err := s.queueOrdered(ctx, s.info.session)
+ pending, err := s.queueOrdered(ctx, s.info.Session)
if err != nil {
return
}
@@ -278,7 +194,7 @@ func (s *databaseSemaphore) notifyWaiters(ctx context.Context) {
}).Debug(ctx, "Notifying waiters for semaphore")
for idx := 0; idx < triggerCount; idx++ {
item := pending[idx]
- if item.Controller != s.info.config.controllerName {
+ if item.Controller != s.info.Config.ControllerName {
continue
}
key := workflowKey(item.Key)
@@ -288,42 +204,29 @@ func (s *databaseSemaphore) notifyWaiters(ctx context.Context) {
}
// addToQueue adds the holderkey into priority queue that maintains the priority order to acquire the lock.
-func (s *databaseSemaphore) addToQueue(_ context.Context, holderKey string, priority int32, creationTime time.Time) error {
+func (s *databaseSemaphore) addToQueue(ctx context.Context, holderKey string, priority int32, creationTime time.Time) error {
// Doesn't need a transaction, as no-one else should be inserting exactly this record ever
- var states []stateRecord
- err := s.info.session.SQL().
- Select(stateKeyField).
- From(s.info.config.stateTable).
- Where(db.Cond{stateNameField: s.longDBKey()}).
- And(db.Cond{stateKeyField: holderKey}).
- And(db.Cond{stateControllerField: s.info.config.controllerName}).
- All(&states)
+ states, err := s.queries.CheckQueueExists(ctx, s.longDBKey(), holderKey, s.info.Config.ControllerName)
if err != nil {
return err
}
if len(states) > 0 {
return nil
}
- record := &stateRecord{
+ record := &syncdb.StateRecord{
Name: s.longDBKey(),
Key: holderKey,
- Controller: s.info.config.controllerName,
+ Controller: s.info.Config.ControllerName,
Held: false,
Priority: priority,
Time: creationTime,
}
- _, err = s.info.session.Collection(s.info.config.stateTable).Insert(record)
+ err = s.queries.AddToQueue(ctx, record)
return err
}
-func (s *databaseSemaphore) removeFromQueue(_ context.Context, holderKey string) error {
- _, err := s.info.session.SQL().
- DeleteFrom(s.info.config.stateTable).
- Where(db.Cond{stateNameField: s.longDBKey()}).
- And(db.Cond{stateKeyField: holderKey}).
- And(db.Cond{stateHeldField: false}).
- Exec()
-
+func (s *databaseSemaphore) removeFromQueue(ctx context.Context, holderKey string) error {
+ err := s.queries.RemoveFromQueue(ctx, s.longDBKey(), holderKey)
return err
}
@@ -392,14 +295,14 @@ func (s *databaseSemaphore) checkAcquire(ctx context.Context, holderKey string,
}).Info(ctx, "CheckAcquire - empty queue")
return false, false, ""
}
- if queue[0].Controller != s.info.config.controllerName {
+ if queue[0].Controller != s.info.Config.ControllerName {
logger.WithFields(logging.Fields{
"key": holderKey,
"result": false,
"already_held": false,
"message": waitingMsg,
"queue_controller": queue[0].Controller,
- "current_controller": s.info.config.controllerName,
+ "current_controller": s.info.Config.ControllerName,
}).Info(ctx, "CheckAcquire - different controller")
return false, false, waitingMsg
}
@@ -434,39 +337,25 @@ func (s *databaseSemaphore) acquire(ctx context.Context, holderKey string, tx *t
return false
}
if len(existing) < limit {
- var pending []stateRecord
- err := (*tx.db).SQL().
- Select(stateKeyField).
- From(s.info.config.stateTable).
- Where(db.Cond{stateNameField: s.longDBKey()}).
- And(db.Cond{stateKeyField: holderKey}).
- And(db.Cond{stateControllerField: s.info.config.controllerName}).
- And(db.Cond{stateHeldField: false}).
- All(&pending)
+ pending, err := s.queries.GetPendingInQueueWithSession(ctx, *tx.db, s.longDBKey(), holderKey, s.info.Config.ControllerName)
if err != nil {
logger.WithField("key", holderKey).WithError(err).Error(ctx, "Failed to acquire lock")
return false
}
if len(pending) > 0 {
- _, err := (*tx.db).SQL().Update(s.info.config.stateTable).
- Set(stateHeldField, true).
- Where(db.Cond{stateNameField: s.longDBKey()}).
- And(db.Cond{stateKeyField: holderKey}).
- And(db.Cond{stateControllerField: s.info.config.controllerName}).
- And(db.Cond{stateHeldField: false}).
- Exec()
+ err := s.queries.UpdateStateToHeldWithSession(ctx, *tx.db, s.longDBKey(), holderKey, s.info.Config.ControllerName)
if err != nil {
logger.WithField("key", holderKey).WithError(err).Error(ctx, "Failed to acquire lock")
return false
}
} else {
- record := &stateRecord{
+ record := &syncdb.StateRecord{
Name: s.longDBKey(),
Key: holderKey,
- Controller: s.info.config.controllerName,
+ Controller: s.info.Config.ControllerName,
Held: true,
}
- _, err := (*tx.db).Collection(s.info.config.stateTable).Insert(record)
+ err := s.queries.InsertHeldStateWithSession(ctx, *tx.db, record)
if err != nil {
logger.WithField("key", holderKey).WithError(err).Error(ctx, "Failed to acquire lock")
return false
@@ -525,19 +414,10 @@ func (s *databaseSemaphore) tryAcquire(ctx context.Context, holderKey string, tx
func (s *databaseSemaphore) expireLocks(ctx context.Context) {
logger := s.logger(ctx)
- since := time.Now().Add(-s.info.config.inactiveControllerTimeout)
- subquery := s.info.session.SQL().
- Select(controllerNameField).
- From(s.info.config.controllerTable).
- And(db.Cond{controllerTimeField + " <=": since})
-
- // Delete locks from inactive controllers
- result, err := s.info.session.SQL().DeleteFrom(s.info.config.lockTable).
- Where(db.Cond{lockControllerField + " IN": subquery}).
- Exec()
+ rowsAffected, err := s.queries.ExpireInactiveLocks(ctx, s.info.Config.InactiveControllerTimeout)
if err != nil {
logger.WithError(err).Error(ctx, "Failed to expire locks")
- } else if rowsAffected, err := result.RowsAffected(); err == nil && rowsAffected > 0 {
+ } else if rowsAffected > 0 {
logger.WithField("rowsAffected", rowsAffected).Info(ctx, "Expired locks")
}
}
diff --git a/workflow/sync/database_semaphore_test.go b/workflow/sync/database_semaphore_test.go
index deaa34360591..1c78312320a4 100644
--- a/workflow/sync/database_semaphore_test.go
+++ b/workflow/sync/database_semaphore_test.go
@@ -11,6 +11,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/argoproj/argo-workflows/v3/util/sqldb"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
)
var testDBTypes []sqldb.DBType
@@ -39,10 +40,10 @@ func TestInactiveControllerDBSemaphore(t *testing.T) {
defer deferfunc()
// Update the controller heartbeat to be older than the inactive controller timeout
- staleTime := time.Now().Add(-info.config.inactiveControllerTimeout * 2)
- _, err := info.session.SQL().Update(info.config.controllerTable).
+ staleTime := time.Now().Add(-info.Config.InactiveControllerTimeout * 2)
+ _, err := info.Session.SQL().Update(info.Config.ControllerTable).
Set("time", staleTime).
- Where(db.Cond{"controller": info.config.controllerName}).
+ Where(db.Cond{"controller": info.Config.ControllerName}).
Exec()
require.NoError(t, err)
@@ -52,14 +53,14 @@ func TestInactiveControllerDBSemaphore(t *testing.T) {
require.NoError(t, s.addToQueue(ctx, "foo/wf-02", 0, now.Add(time.Second)))
// Try to acquire - this should fail because the controller is considered inactive
- tx := &transaction{db: &info.session}
+ tx := &transaction{db: &info.Session}
acquired, _ := s.tryAcquire(ctx, "foo/wf-01", tx)
assert.False(t, acquired, "Semaphore should not be acquired when controller is marked as inactive")
// Now update the controller heartbeat to be current
- _, err = info.session.SQL().Update(info.config.controllerTable).
+ _, err = info.Session.SQL().Update(info.Config.ControllerTable).
Set("time", time.Now()).
- Where(db.Cond{"controller": info.config.controllerName}).
+ Where(db.Cond{"controller": info.Config.ControllerName}).
Exec()
require.NoError(t, err)
@@ -82,23 +83,23 @@ func TestOtherControllerDBSemaphore(t *testing.T) {
// Add an entry for another controller
otherController := "otherController"
- controllerRecord := &controllerHealthRecord{
+ controllerRecord := &syncdb.ControllerHealthRecord{
Controller: otherController,
Time: time.Now(),
}
- _, err := info.session.Collection(info.config.controllerTable).
+ _, err := info.Session.Collection(info.Config.ControllerTable).
Insert(controllerRecord)
require.NoError(t, err)
// Add an item to the queue from the other controller
- semaphoreRecord := &stateRecord{
+ semaphoreRecord := &syncdb.StateRecord{
Name: s.longDBKey(),
Key: "foo/other-wf-01",
Controller: otherController,
Held: false,
Time: time.Now(),
}
- _, err = info.session.Collection(info.config.stateTable).
+ _, err = info.Session.Collection(info.Config.StateTable).
Insert(semaphoreRecord)
require.NoError(t, err)
@@ -107,13 +108,13 @@ func TestOtherControllerDBSemaphore(t *testing.T) {
require.NoError(t, s.addToQueue(ctx, "foo/our-wf-01", 0, now.Add(time.Second)))
// Try to acquire - this should fail because the other controller's item is first in line
- tx := &transaction{db: &info.session}
+ tx := &transaction{db: &info.Session}
acquired, _ := s.tryAcquire(ctx, "foo/our-wf-01", tx)
assert.False(t, acquired, "Semaphore should not be acquired when another controller's item is first in queue")
// Now mark the other controller as inactive by setting its timestamp to be old
- staleTime := time.Now().Add(-info.config.inactiveControllerTimeout * 2)
- _, err = info.session.SQL().Update(info.config.controllerTable).
+ staleTime := time.Now().Add(-info.Config.InactiveControllerTimeout * 2)
+ _, err = info.Session.SQL().Update(info.Config.ControllerTable).
Set("time", staleTime).
Where(db.Cond{"controller": otherController}).
Exec()
@@ -144,23 +145,23 @@ func TestDifferentSemaphoreDBSemaphore(t *testing.T) {
// Add an entry for another controller
otherController := "otherController"
- controllerRecord := &controllerHealthRecord{
+ controllerRecord := &syncdb.ControllerHealthRecord{
Controller: otherController,
Time: time.Now(),
}
- _, err := info.session.Collection(info.config.controllerTable).
+ _, err := info.Session.Collection(info.Config.ControllerTable).
Insert(controllerRecord)
require.NoError(t, err)
// Add an item to the queue from the other cluster with a DIFFERENT semaphore name
- semaphoreRecord := &stateRecord{
+ semaphoreRecord := &syncdb.StateRecord{
Name: "sem/different/semaphore",
Key: "foo/other-wf-01",
Controller: otherController,
Held: false,
Time: time.Now(),
}
- _, err = info.session.Collection(info.config.stateTable).
+ _, err = info.Session.Collection(info.Config.StateTable).
Insert(semaphoreRecord)
require.NoError(t, err)
@@ -169,7 +170,7 @@ func TestDifferentSemaphoreDBSemaphore(t *testing.T) {
require.NoError(t, s.addToQueue(ctx, "foo/our-wf-01", 0, now.Add(time.Second)))
// Try to acquire - this should succeed because the other cluster's item is for a different semaphore
- tx := &transaction{db: &info.session}
+ tx := &transaction{db: &info.Session}
acquired, _ := s.tryAcquire(ctx, "foo/our-wf-01", tx)
assert.True(t, acquired, "Semaphore should be acquired when another cluster's item is for a different semaphore")
@@ -203,7 +204,7 @@ func TestMutexAndSemaphoreWithSameName(t *testing.T) {
now := time.Now()
// Mutex workflow 1
- tx := &transaction{db: &info.session}
+ tx := &transaction{db: &info.Session}
require.NoError(t, mutex.addToQueue(ctx, "foo/wf-mutex-1", 0, now))
mutexAcquired1, _ := mutex.tryAcquire(ctx, "foo/wf-mutex-1", tx)
assert.True(t, mutexAcquired1, "Mutex should be acquired by first workflow")
@@ -257,10 +258,10 @@ func TestMutexAndSemaphoreWithSameName(t *testing.T) {
assert.False(t, mutexAcquired3, "Mutex should still be held by another workflow")
// Verify by checking the database directly
- var allHolders []stateRecord
- err := info.session.SQL().
+ var allHolders []syncdb.StateRecord
+ err := info.Session.SQL().
Select("*").
- From(info.config.stateTable).
+ From(info.Config.StateTable).
Where(db.Cond{"held": true}).
All(&allHolders)
require.NoError(t, err)
@@ -314,10 +315,10 @@ func TestSyncLimitCacheDB(t *testing.T) {
assert.Equal(t, 5, limit, "Limit should still be 5")
// Update the semaphore limit in the database
- _, err := info.session.SQL().
- Update(info.config.limitTable).
- Set(limitSizeField, 10).
- Where(db.Cond{limitNameField: s.shortDBKey}).
+ _, err := info.Session.SQL().
+ Update(info.Config.LimitTable).
+ Set(syncdb.LimitSizeField, 10).
+ Where(db.Cond{syncdb.LimitNameField: s.shortDBKey}).
Exec()
require.NoError(t, err)
@@ -355,10 +356,10 @@ func TestSyncLimitCacheDB(t *testing.T) {
mockNow = mockNow.Add(1 * time.Millisecond)
// Update the semaphore limit in the database
- _, err := info.session.SQL().
- Update(info.config.limitTable).
- Set(limitSizeField, 7).
- Where(db.Cond{limitNameField: s.shortDBKey}).
+ _, err := info.Session.SQL().
+ Update(info.Config.LimitTable).
+ Set(syncdb.LimitSizeField, 7).
+ Where(db.Cond{syncdb.LimitNameField: s.shortDBKey}).
Exec()
require.NoError(t, err)
diff --git a/workflow/sync/semaphore_test.go b/workflow/sync/semaphore_test.go
index 4dfb2ead54a1..edf5005d1def 100644
--- a/workflow/sync/semaphore_test.go
+++ b/workflow/sync/semaphore_test.go
@@ -13,6 +13,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/argoproj/argo-workflows/v3/util/sqldb"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
)
// semaphoreFactory is a function that creates a semaphore for testing
@@ -27,13 +28,13 @@ func createTestInternalSemaphore(ctx context.Context, t *testing.T, name, namesp
}
// createTestDatabaseSemaphore creates a database-backed semaphore for testing, used elsewhere
-func createTestDatabaseSemaphore(ctx context.Context, t *testing.T, name, namespace string, limit int, cacheTTL time.Duration, nextWorkflow NextWorkflow, dbType sqldb.DBType) (*databaseSemaphore, dbInfo, func()) {
+func createTestDatabaseSemaphore(ctx context.Context, t *testing.T, name, namespace string, limit int, cacheTTL time.Duration, nextWorkflow NextWorkflow, dbType sqldb.DBType) (*databaseSemaphore, syncdb.DBInfo, func()) {
t.Helper()
info, deferfunc, _, err := createTestDBSession(ctx, t, dbType)
require.NoError(t, err)
dbKey := fmt.Sprintf("%s/%s", namespace, name)
- _, err = info.session.SQL().Exec("INSERT INTO sync_limit (name, sizelimit) VALUES (?, ?)", dbKey, limit)
+ _, err = info.Session.SQL().Exec("INSERT INTO sync_limit (name, sizelimit) VALUES (?, ?)", dbKey, limit)
require.NoError(t, err)
s, err := newDatabaseSemaphore(ctx, name, dbKey, nextWorkflow, info, cacheTTL)
@@ -47,14 +48,14 @@ func createTestDatabaseSemaphore(ctx context.Context, t *testing.T, name, namesp
func createTestDatabaseSemaphorePostgres(ctx context.Context, t *testing.T, name, namespace string, limit int, nextWorkflow NextWorkflow) (semaphore, db.Session, func()) {
t.Helper()
s, info, deferfunc := createTestDatabaseSemaphore(ctx, t, name, namespace, limit, 0, nextWorkflow, sqldb.Postgres)
- return s, info.session, deferfunc
+ return s, info.Session, deferfunc
}
// createTestDatabaseSemaphoreMySQL creates a database-backed semaphore that conforms to the factory
func createTestDatabaseSemaphoreMySQL(ctx context.Context, t *testing.T, name, namespace string, limit int, nextWorkflow NextWorkflow) (semaphore, db.Session, func()) {
t.Helper()
s, info, deferfunc := createTestDatabaseSemaphore(ctx, t, name, namespace, limit, 0, nextWorkflow, sqldb.MySQL)
- return s, info.session, deferfunc
+ return s, info.Session, deferfunc
}
// semaphoreFactories defines the available semaphore implementations for testing
diff --git a/workflow/sync/sync_manager.go b/workflow/sync/sync_manager.go
index 2b032d186608..d79ab17e07eb 100644
--- a/workflow/sync/sync_manager.go
+++ b/workflow/sync/sync_manager.go
@@ -16,6 +16,7 @@ import (
"github.com/argoproj/argo-workflows/v3/config"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
)
type (
@@ -31,7 +32,8 @@ type Manager struct {
getSyncLimit GetSyncLimit
syncLimitCacheTTL time.Duration
isWFDeleted IsWorkflowDeleted
- dbInfo dbInfo
+ dbInfo syncdb.DBInfo
+ queries syncdb.SyncQueries
log logging.Logger
}
@@ -43,7 +45,7 @@ const (
)
func NewLockManager(ctx context.Context, kubectlConfig kubernetes.Interface, namespace string, config *config.SyncConfig, getSyncLimit GetSyncLimit, nextWorkflow NextWorkflow, isWFDeleted IsWorkflowDeleted) *Manager {
- return createLockManager(ctx, dbSessionFromConfig(ctx, kubectlConfig, namespace, config), config, getSyncLimit, nextWorkflow, isWFDeleted)
+ return createLockManager(ctx, syncdb.DBSessionFromConfig(ctx, kubectlConfig, namespace, config), config, getSyncLimit, nextWorkflow, isWFDeleted)
}
func createLockManager(ctx context.Context, dbSession db.Session, config *config.SyncConfig, getSyncLimit GetSyncLimit, nextWorkflow NextWorkflow, isWFDeleted IsWorkflowDeleted) *Manager {
@@ -54,6 +56,10 @@ func createLockManager(ctx context.Context, dbSession db.Session, config *config
ctx, log := logging.RequireLoggerFromContext(ctx).WithField("component", "lock_manager").InContext(ctx)
log.WithField("syncLimitCacheTTL", syncLimitCacheTTL).Info(ctx, "Sync manager ttl")
+ dbInfo := syncdb.DBInfo{
+ Session: dbSession,
+ Config: syncdb.DBConfigFromConfig(config),
+ }
sm := &Manager{
syncLockMap: make(map[string]semaphore),
lock: &sync.RWMutex{},
@@ -61,16 +67,14 @@ func createLockManager(ctx context.Context, dbSession db.Session, config *config
getSyncLimit: getSyncLimit,
syncLimitCacheTTL: syncLimitCacheTTL,
isWFDeleted: isWFDeleted,
- dbInfo: dbInfo{
- session: dbSession,
- config: dbConfigFromConfig(config),
- },
- log: log,
+ dbInfo: dbInfo,
+ queries: syncdb.NewSyncQueries(dbSession, dbInfo.Config),
+ log: log,
}
- log.WithField("dbConfigured", sm.dbInfo.session != nil).Info(ctx, "Sync manager initialized")
- sm.dbInfo.migrate(ctx)
+ log.WithField("dbConfigured", sm.dbInfo.Session != nil).Info(ctx, "Sync manager initialized")
+ sm.dbInfo.Migrate(ctx)
- if sm.dbInfo.session != nil {
+ if sm.dbInfo.Session != nil {
sm.backgroundNotifier(ctx, config.PollSeconds)
sm.dbControllerHeartbeat(ctx, config.HeartbeatSeconds)
}
@@ -228,7 +232,7 @@ func (sm *Manager) Initialize(ctx context.Context, wfs []wfv1.Workflow) {
continue
}
key := getUpgradedKey(&wf, holders, level)
- tx := &transaction{db: &sm.dbInfo.session}
+ tx := &transaction{db: &sm.dbInfo.Session}
if semaphore != nil && semaphore.acquire(ctx, key, tx) {
sm.log.WithFields(logging.Fields{"key": key, "semaphore": holding.Semaphore}).Info(ctx, "Lock acquired")
}
@@ -253,7 +257,7 @@ func (sm *Manager) Initialize(ctx context.Context, wfs []wfv1.Workflow) {
continue
}
key := getUpgradedKey(&wf, holding.Holder, level)
- tx := &transaction{db: &sm.dbInfo.session}
+ tx := &transaction{db: &sm.dbInfo.Session}
mutex.acquire(ctx, key, tx)
}
sm.syncLockMap[holding.Mutex] = mutex
@@ -299,7 +303,7 @@ func (sm *Manager) TryAcquire(ctx context.Context, wf *wfv1.Workflow, nodeName s
if err != nil {
return false, false, "", failedLockName, fmt.Errorf("couldn't decode locks for session: %w", err)
}
- if needDB && sm.dbInfo.session == nil {
+ if needDB && sm.dbInfo.Session == nil {
return false, false, "", failedLockName, fmt.Errorf("synchronization database session is not available")
}
if needDB {
@@ -309,7 +313,7 @@ func (sm *Manager) TryAcquire(ctx context.Context, wf *wfv1.Workflow, nodeName s
var failedLockName string
var lastErr error
for retryCounter := range 5 {
- err := sm.dbInfo.session.TxContext(ctx, func(sess db.Session) error {
+ err := sm.dbInfo.Session.TxContext(ctx, func(sess db.Session) error {
sm.log.WithFields(logging.Fields{
"holderKey": holderKey,
"attempt": retryCounter + 1,
@@ -611,7 +615,7 @@ func (sm *Manager) initializeSemaphore(ctx context.Context, semaphoreName string
case lockKindConfigMap:
return newInternalSemaphore(ctx, semaphoreName, sm.nextWorkflow, sm.getSyncLimit, sm.syncLimitCacheTTL)
case lockKindDatabase:
- if sm.dbInfo.session == nil {
+ if sm.dbInfo.Session == nil {
return nil, fmt.Errorf("database session is not available for semaphore %s", semaphoreName)
}
return newDatabaseSemaphore(ctx, semaphoreName, lock.dbKey(), sm.nextWorkflow, sm.dbInfo, sm.syncLimitCacheTTL)
@@ -629,7 +633,7 @@ func (sm *Manager) initializeMutex(ctx context.Context, mutexName string) (semap
case lockKindMutex:
return newInternalMutex(mutexName, sm.nextWorkflow), nil
case lockKindDatabase:
- if sm.dbInfo.session == nil {
+ if sm.dbInfo.Session == nil {
return nil, fmt.Errorf("database session is not available for mutex %s", mutexName)
}
return newDatabaseMutex(mutexName, lock.dbKey(), sm.nextWorkflow, sm.dbInfo), nil
@@ -639,7 +643,7 @@ func (sm *Manager) initializeMutex(ctx context.Context, mutexName string) (semap
}
func (sm *Manager) backgroundNotifier(ctx context.Context, period *int) {
- sm.log.WithField("pollInterval", secondsToDurationWithDefault(period, defaultDBPollSeconds)).
+ sm.log.WithField("pollInterval", syncdb.SecondsToDurationWithDefault(period, syncdb.DefaultDBHeartbeatSeconds)).
Info(ctx, "Starting background notification for sync locks")
go wait.UntilWithContext(ctx, func(_ context.Context) {
sm.lock.Lock()
@@ -648,7 +652,7 @@ func (sm *Manager) backgroundNotifier(ctx context.Context, period *int) {
}
sm.lock.Unlock()
},
- secondsToDurationWithDefault(period, defaultDBPollSeconds),
+ syncdb.SecondsToDurationWithDefault(period, syncdb.DefaultDBPollSeconds),
)
}
@@ -659,23 +663,19 @@ func (sm *Manager) dbControllerHeartbeat(ctx context.Context, period *int) {
// Failure here is not critical, so we don't check errors, we may already be in the table
ll := db.LC().Level()
db.LC().SetLevel(db.LogLevelError)
- _, _ = sm.dbInfo.session.Collection(sm.dbInfo.config.controllerTable).
- Insert(&controllerHealthRecord{
- Controller: sm.dbInfo.config.controllerName,
- Time: time.Now(),
- })
+ _ = sm.queries.InsertControllerHealth(ctx, &syncdb.ControllerHealthRecord{
+ Controller: sm.dbInfo.Config.ControllerName,
+ Time: time.Now(),
+ })
db.LC().SetLevel(ll)
sm.dbControllerUpdate(ctx)
go wait.UntilWithContext(ctx, func(_ context.Context) { sm.dbControllerUpdate(ctx) },
- secondsToDurationWithDefault(period, defaultDBHeartbeatSeconds))
+ syncdb.SecondsToDurationWithDefault(period, syncdb.DefaultDBHeartbeatSeconds))
}
func (sm *Manager) dbControllerUpdate(ctx context.Context) {
- _, err := sm.dbInfo.session.SQL().Update(sm.dbInfo.config.controllerTable).
- Set(controllerTimeField, time.Now()).
- Where(db.Cond{controllerNameField: sm.dbInfo.config.controllerName}).
- Exec()
+ err := sm.queries.UpdateControllerTimestamp(ctx, sm.dbInfo.Config.ControllerName, time.Now())
if err != nil {
sm.log.WithError(err).Error(ctx, "Failed to update sync controller timestamp")
}
diff --git a/workflow/sync/sync_manager_multiple_test.go b/workflow/sync/sync_manager_multiple_test.go
index 44900008d297..508693cd6e87 100644
--- a/workflow/sync/sync_manager_multiple_test.go
+++ b/workflow/sync/sync_manager_multiple_test.go
@@ -66,18 +66,18 @@ func setupMultipleLockManagers(t *testing.T, dbType sqldb.DBType, semaphoreSize
// Set up the semaphore limit in the database
dbKey := "default/my-db-semaphore"
- _, err = info.session.SQL().Exec("INSERT INTO sync_limit (name, sizelimit) VALUES (?, ?)", dbKey, semaphoreSize)
+ _, err = info.Session.SQL().Exec("INSERT INTO sync_limit (name, sizelimit) VALUES (?, ?)", dbKey, semaphoreSize)
require.NoError(t, err)
// Create two sync managers with the same database session
- syncMgr1 := createLockManager(ctx, info.session, &cfg, func(_ context.Context, _ string) (int, error) { return 2, nil }, func(key string) {}, WorkflowExistenceFunc)
+ syncMgr1 := createLockManager(ctx, info.Session, &cfg, func(_ context.Context, _ string) (int, error) { return 2, nil }, func(key string) {}, WorkflowExistenceFunc)
require.NotNil(t, syncMgr1)
- require.NotNil(t, syncMgr1.dbInfo.session)
+ require.NotNil(t, syncMgr1.dbInfo.Session)
// Second controller
cfg.ControllerName = "test2"
- syncMgr2 := createLockManager(ctx, info.session, &cfg, func(_ context.Context, _ string) (int, error) { return 2, nil }, func(key string) {}, WorkflowExistenceFunc)
+ syncMgr2 := createLockManager(ctx, info.Session, &cfg, func(_ context.Context, _ string) (int, error) { return 2, nil }, func(key string) {}, WorkflowExistenceFunc)
require.NotNil(t, syncMgr2)
- require.NotNil(t, syncMgr2.dbInfo.session)
+ require.NotNil(t, syncMgr2.dbInfo.Session)
return ctx, deferfn2, syncMgr1, syncMgr2
}
diff --git a/workflow/sync/sync_manager_test.go b/workflow/sync/sync_manager_test.go
index c83e1c2901ab..01d5e19295d9 100644
--- a/workflow/sync/sync_manager_test.go
+++ b/workflow/sync/sync_manager_test.go
@@ -23,6 +23,7 @@ import (
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
fakewfclientset "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/fake"
"github.com/argoproj/argo-workflows/v3/util/sqldb"
+ syncdb "github.com/argoproj/argo-workflows/v3/util/sync/db"
)
const configMap = `
@@ -1628,15 +1629,15 @@ func TestBackgroundNotifierClearsExpiredLocks(t *testing.T) {
// Insert controller records - one fresh, one stale
now := time.Now()
- staleTime := now.Add(-info.config.inactiveControllerTimeout * 2) // Double the inactive timeout
+ staleTime := now.Add(-info.Config.InactiveControllerTimeout * 2) // Double the inactive timeout
- _, err = info.session.Collection(info.config.controllerTable).Insert(&controllerHealthRecord{
+ _, err = info.Session.Collection(info.Config.ControllerTable).Insert(&syncdb.ControllerHealthRecord{
Controller: activeController,
Time: now,
})
require.NoError(t, err)
- _, err = info.session.Collection(info.config.controllerTable).Insert(&controllerHealthRecord{
+ _, err = info.Session.Collection(info.Config.ControllerTable).Insert(&syncdb.ControllerHealthRecord{
Controller: inactiveController,
Time: staleTime,
})
@@ -1646,21 +1647,21 @@ func TestBackgroundNotifierClearsExpiredLocks(t *testing.T) {
lockName1 := "test-lock-active"
lockName2 := "test-lock-inactive"
- _, err = info.session.Collection(info.config.lockTable).Insert(&lockRecord{
+ _, err = info.Session.Collection(info.Config.LockTable).Insert(&syncdb.LockRecord{
Name: lockName1,
Controller: activeController,
Time: now,
})
require.NoError(t, err)
- _, err = info.session.Collection(info.config.lockTable).Insert(&lockRecord{
+ _, err = info.Session.Collection(info.Config.LockTable).Insert(&syncdb.LockRecord{
Name: lockName2,
Controller: inactiveController,
Time: now, // Time doesn't matter, controller is what matters
})
require.NoError(t, err)
- _, err = info.session.SQL().Exec("INSERT INTO sync_limit (name, sizelimit) VALUES (?, ?)", "foo/test-semaphore", 100)
+ _, err = info.Session.SQL().Exec("INSERT INTO sync_limit (name, sizelimit) VALUES (?, ?)", "foo/test-semaphore", 100)
require.NoError(t, err)
// Initialize a semaphore so it gets added to the syncLockMap
testsem, err := newDatabaseSemaphore(ctx, "test-semaphore", "foo/test-semaphore", func(key string) {}, info, 0)
@@ -1669,7 +1670,7 @@ func TestBackgroundNotifierClearsExpiredLocks(t *testing.T) {
syncLockMap["sem/test-semaphore"] = testsem
// Verify both lock records exist initially
- lockCount, err := info.session.Collection(info.config.lockTable).Count()
+ lockCount, err := info.Session.Collection(info.Config.LockTable).Count()
require.NoError(t, err)
assert.Equal(t, uint64(2), lockCount, "Should have two lock records initially")
@@ -1679,8 +1680,8 @@ func TestBackgroundNotifierClearsExpiredLocks(t *testing.T) {
}
// Check that only the active controller's lock remains
- var remainingLocks []lockRecord
- err = info.session.SQL().Select("*").From(info.config.lockTable).All(&remainingLocks)
+ var remainingLocks []syncdb.LockRecord
+ err = info.Session.SQL().Select("*").From(info.Config.LockTable).All(&remainingLocks)
require.NoError(t, err)
assert.Len(t, remainingLocks, 1, "Should have one lock record remaining")
@@ -1798,10 +1799,10 @@ func TestUnconfiguredSemaphores(t *testing.T) {
defer cleanup()
// Configure sync manager
- syncManager := createLockManager(ctx, info.session, &syncConfig, nil, func(key string) {
+ syncManager := createLockManager(ctx, info.Session, &syncConfig, nil, func(key string) {
}, WorkflowExistenceFunc)
require.NotNil(t, syncManager)
- require.NotNil(t, syncManager.dbInfo.session)
+ require.NotNil(t, syncManager.dbInfo.Session)
// Create a workflow with a database semaphore
wf := wfv1.MustUnmarshalWorkflow(wfWithDBSemaphore)