From 7e38f4475d0199fdfa99ce63ef348eb5ab23bf6c Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Tue, 16 Sep 2025 09:38:46 -0500 Subject: [PATCH 01/43] Limit the number of allowed instances sets in v1 This allows CEL rules inside instance sets to fit within the cost budget. Without it, Kubernetes estimates that rules may execute over the maximum size of a request. The cost of the affected rule changes from ~700k to ~9k. --- ...ator.crunchydata.com_postgresclusters.yaml | 5 +++- .../postgrescluster/postgres_config_test.go | 30 +++++++++++-------- .../v1/postgrescluster_types.go | 8 ++--- 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 5fe0db4d0..c863abe73 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -11683,6 +11683,7 @@ spec: required: - dataVolumeClaimSpec type: object + maxItems: 16 minItems: 1 type: array x-kubernetes-list-map-keys: @@ -18412,7 +18413,9 @@ spec: - fieldPath: .config.parameters.log_directory message: all instances need an additional volume to log in "/volumes" rule: self.?config.parameters.log_directory.optMap(v, type(v) != string - || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue())).orValue(true) + || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() + && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + + volume.name)))).orValue(true) status: description: PostgresClusterStatus defines the observed state of PostgresCluster properties: diff --git a/internal/testing/validation/postgrescluster/postgres_config_test.go b/internal/testing/validation/postgrescluster/postgres_config_test.go index d9529a8d0..5a636ac43 100644 --- a/internal/testing/validation/postgrescluster/postgres_config_test.go +++ b/internal/testing/validation/postgrescluster/postgres_config_test.go @@ -226,25 +226,31 @@ func TestPostgresConfigParametersV1(t *testing.T) { message: `"/pgwal/logs/postgres"`, }, - // Directories inside /volumes are acceptable, but every instance set needs additional volumes. - // - // TODO(validation): This could be more precise and check the directory name of each additional - // volume, but Kubernetes 1.33 incorrectly estimates the cost of volume.name: - // https://github.com/kubernetes-sigs/controller-tools/pull/1270#issuecomment-3272211184 + // Directories inside /volumes are acceptable, but every instance set needs the correct additional volume. { - name: "two instance sets and two additional volumes", - value: "/volumes/anything", + name: "two instance sets and two correct additional volumes", + value: "/volumes/yep", instances: `[ - { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: dir, claimName: a }] } }, - { name: two, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: dir, claimName: b }] } }, + { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: a }] } }, + { name: two, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: b }] } }, ]`, valid: true, }, + { + name: "two instance sets and one correct additional volume", + value: "/volumes/yep", + instances: `[ + { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: a }] } }, + { name: two, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: diff, claimName: b }] } }, + ]`, + valid: false, + message: `all instances need an additional volume`, + }, { name: "two instance sets and one additional volume", - value: "/volumes/anything", + value: "/volumes/yep", instances: `[ - { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: dir, claimName: a }] } }, + { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: a }] } }, { name: two, dataVolumeClaimSpec: ` + volume + ` }, ]`, valid: false, @@ -252,7 +258,7 @@ func TestPostgresConfigParametersV1(t *testing.T) { }, { name: "two instance sets and no additional volumes", - value: "/volumes/anything", + value: "/volumes/yep", instances: `[ { name: one, dataVolumeClaimSpec: ` + volume + ` }, { name: two, dataVolumeClaimSpec: ` + volume + ` }, diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index 2c3b9c411..1a642e13c 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -21,11 +21,7 @@ import ( // // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "volumes.temp" to log in "/pgtmp"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgtmp/logs/postgres") || self.instances.all(i, i.?volumes.temp.hasValue())).orValue(true)` // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "walVolumeClaimSpec" to log in "/pgwal"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgwal/logs/postgres") || self.instances.all(i, i.?walVolumeClaimSpec.hasValue())).orValue(true)` -// -// +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need an additional volume to log in "/volumes"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue())).orValue(true)` -// -// TODO: Also check the above path against volume names: `i.?volumes.additional.hasValue() && i.volumes.additional.exists(directory.startsWith("/volumes/" + volume.name))` -// https://github.com/kubernetes-sigs/controller-tools/pull/1270#issuecomment-3272211184 +// +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need an additional volume to log in "/volumes"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)` type PostgresClusterSpec struct { // +optional Metadata *v1beta1.Metadata `json:"metadata,omitempty"` @@ -110,9 +106,11 @@ type PostgresClusterSpec struct { // Specifies one or more sets of PostgreSQL pods that replicate data for // this cluster. + // --- // +listType=map // +listMapKey=name // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 InstanceSets []PostgresInstanceSetSpec `json:"instances"` From f9d9c4262f5c6681f8e763650ac6c4e3ea756f07 Mon Sep 17 00:00:00 2001 From: Drew Sessler Date: Wed, 3 Sep 2025 11:29:05 -0700 Subject: [PATCH 02/43] Add LoggingConfiguration struct for providing log path to pgbackrest processes. Add ability to send pgbackrest logs to additional volume for instance sidecar, repo host, and backups jobs. Add validation to prevent incorrect log paths or setting pgbackrest log-path via global. Add appropriate unit and integration tests. --- ...ator.crunchydata.com_postgresclusters.yaml | 75 ++++- internal/collector/pgbackrest.go | 10 +- internal/collector/pgbackrest_test.go | 18 +- internal/collector/postgres.go | 8 +- .../controller/postgrescluster/instance.go | 4 +- .../controller/postgrescluster/pgbackrest.go | 57 ++-- .../postgrescluster/pgbackrest_test.go | 132 +++++++- internal/pgbackrest/config.go | 70 ++-- internal/pgbackrest/config_test.go | 184 +++++++++++ internal/postgres/config.go | 4 +- .../testing/validation/pgbackrest_test.go | 310 ++++++++++++++++++ internal/util/pgbackrest.go | 26 ++ internal/util/pgbackrest_test.go | 42 +++ .../v1/pgbackrest_types.go | 18 + .../v1/postgrescluster_types.go | 9 +- .../v1/zz_generated.deepcopy.go | 16 + .../v1beta1/pgbackrest_types.go | 14 +- .../v1beta1/postgrescluster_types.go | 3 + .../v1beta1/shared_types.go | 7 + .../v1beta1/zz_generated.deepcopy.go | 30 ++ 20 files changed, 941 insertions(+), 96 deletions(-) create mode 100644 internal/testing/validation/pgbackrest_test.go create mode 100644 internal/util/pgbackrest.go create mode 100644 internal/util/pgbackrest_test.go create mode 100644 pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index c863abe73..5a250c935 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -151,7 +151,7 @@ spec: properties: configuration: description: |- - Projected volumes containing custom pgBackRest configuration. These files are mounted + Projected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.html @@ -1424,6 +1424,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in Backup Job Pods. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest backup Job pods. @@ -1583,6 +1591,14 @@ spec: x-kubernetes-list-type: map type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in postgres instance pods. + properties: + path: + maxLength: 256 + type: string + type: object manual: description: Defines details for manual pgBackRest backup Jobs @@ -2551,6 +2567,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in the repo host pod. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest repo host pod. Changing this value @@ -4562,6 +4586,21 @@ spec: required: - repos type: object + x-kubernetes-validations: + - message: pgbackrest sidecar log path is restricted to an existing + additional volume + rule: '!self.?log.path.hasValue() || self.log.path.startsWith("/volumes/")' + - message: repo host log path is restricted to an existing additional + volume + rule: '!self.?repoHost.log.path.hasValue() || self.repoHost.volumes.additional.exists(x, + self.repoHost.log.path.startsWith("/volumes/"+x.name))' + - message: backup jobs log path is restricted to an existing additional + volume + rule: '!self.?jobs.log.path.hasValue() || self.jobs.volumes.additional.exists(x, + self.jobs.log.path.startsWith("/volumes/"+x.name))' + - message: pgbackrest log-path must be set via the various log.path + fields in the spec + rule: '!self.?global["log-path"].hasValue()' snapshots: description: VolumeSnapshot configuration properties: @@ -11209,6 +11248,7 @@ spec: type: object type: array volumes: + description: Volumes to be added to the instance set. properties: additional: description: Additional pre-existing volumes to add to the @@ -18416,6 +18456,12 @@ spec: || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true) + - fieldPath: .backups.pgbackrest.log.path + message: all instances need an additional volume for pgbackrest sidecar + to log in "/volumes" + rule: self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes") + || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, + v.startsWith("/volumes/" + volume.name)))).orValue(true) status: description: PostgresClusterStatus defines the observed state of PostgresCluster properties: @@ -18951,7 +18997,7 @@ spec: properties: configuration: description: |- - Projected volumes containing custom pgBackRest configuration. These files are mounted + Projected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.html @@ -20224,6 +20270,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in Backup Job Pods. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest backup Job pods. @@ -20383,6 +20437,14 @@ spec: x-kubernetes-list-type: map type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in postgres instance pods. + properties: + path: + maxLength: 256 + type: string + type: object manual: description: Defines details for manual pgBackRest backup Jobs @@ -21351,6 +21413,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in the repo host pod. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest repo host pod. Changing this value @@ -30002,6 +30072,7 @@ spec: type: object type: array volumes: + description: Volumes to be added to the instance set. properties: additional: description: Additional pre-existing volumes to add to the diff --git a/internal/collector/pgbackrest.go b/internal/collector/pgbackrest.go index 75cc9a55c..1d51a2218 100644 --- a/internal/collector/pgbackrest.go +++ b/internal/collector/pgbackrest.go @@ -8,7 +8,6 @@ import ( "context" _ "embed" "encoding/json" - "fmt" "slices" "github.com/crunchydata/postgres-operator/internal/naming" @@ -25,19 +24,12 @@ func NewConfigForPgBackrestRepoHostPod( ctx context.Context, spec *v1beta1.InstrumentationSpec, repos []v1beta1.PGBackRestRepo, + directory string, ) *Config { config := NewConfig(spec) if OpenTelemetryLogsEnabled(ctx, spec) { - var directory string - for _, repo := range repos { - if repo.Volume != nil { - directory = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } - // We should only enter this function if a PVC is assigned for a dedicated repohost // but if we don't have one, exit early. if directory == "" { diff --git a/internal/collector/pgbackrest_test.go b/internal/collector/pgbackrest_test.go index 2b26d4053..653b8b780 100644 --- a/internal/collector/pgbackrest_test.go +++ b/internal/collector/pgbackrest_test.go @@ -30,8 +30,7 @@ func TestNewConfigForPgBackrestRepoHostPod(t *testing.T) { } var instrumentation *v1beta1.InstrumentationSpec require.UnmarshalInto(t, &instrumentation, `{}`) - - config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos) + config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos, "/test/directory") result, err := config.ToYAML() assert.NilError(t, err) @@ -43,7 +42,7 @@ exporters: extensions: file_storage/pgbackrest_logs: create_directory: false - directory: /pgbackrest/repo1/log/receiver + directory: /test/directory/receiver fsync: true processors: batch/1s: @@ -101,8 +100,8 @@ processors: receivers: filelog/pgbackrest_log: include: - - /pgbackrest/repo1/log/*.log - - /pgbackrest/repo1/log/*.log.1 + - /test/directory/*.log + - /test/directory/*.log.1 multiline: line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19} storage: file_storage/pgbackrest_logs @@ -136,8 +135,7 @@ service: Volume: new(v1beta1.RepoPVC), }, } - - config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos) + config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos, "/another/directory") result, err := config.ToYAML() assert.NilError(t, err) @@ -153,7 +151,7 @@ exporters: extensions: file_storage/pgbackrest_logs: create_directory: false - directory: /pgbackrest/repo1/log/receiver + directory: /another/directory/receiver fsync: true processors: batch/1s: @@ -211,8 +209,8 @@ processors: receivers: filelog/pgbackrest_log: include: - - /pgbackrest/repo1/log/*.log - - /pgbackrest/repo1/log/*.log.1 + - /another/directory/*.log + - /another/directory/*.log.1 multiline: line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19} storage: file_storage/pgbackrest_logs diff --git a/internal/collector/postgres.go b/internal/collector/postgres.go index b73ae91a2..ca627a8fd 100644 --- a/internal/collector/postgres.go +++ b/internal/collector/postgres.go @@ -16,6 +16,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -244,8 +245,9 @@ func EnablePostgresLogging( } // pgBackRest pipeline + pgBackRestLogPath := util.GetPGBackRestLogPathForInstance(inCluster) outConfig.Extensions["file_storage/pgbackrest_logs"] = map[string]any{ - "directory": naming.PGBackRestPGDataLogPath + "/receiver", + "directory": pgBackRestLogPath + "/receiver", "create_directory": false, "fsync": true, } @@ -258,8 +260,8 @@ func EnablePostgresLogging( // a log record or two to the old file while rotation is occurring. // The collector knows not to create duplicate logs. "include": []string{ - naming.PGBackRestPGDataLogPath + "/*.log", - naming.PGBackRestPGDataLogPath + "/*.log.1", + pgBackRestLogPath + "/*.log", + pgBackRestLogPath + "/*.log.1", }, "storage": "file_storage/pgbackrest_logs", diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index a700aa1f9..b000074e5 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1205,7 +1205,7 @@ func (r *Reconciler) reconcileInstance( // TODO(sidecar): Create these directories sometime other than startup. collector.AddToPod(ctx, cluster.Spec.Instrumentation, cluster.Spec.ImagePullPolicy, instanceConfigMap, &instance.Spec.Template, []corev1.VolumeMount{postgres.DataVolumeMount()}, pgPassword, - []string{naming.PGBackRestPGDataLogPath}, includeLogrotate, true) + []string{util.GetPGBackRestLogPathForInstance(cluster)}, includeLogrotate, true) } // Add postgres-exporter to the instance Pod spec @@ -1433,7 +1433,7 @@ func (r *Reconciler) reconcileInstanceConfigMap( collector.AddLogrotateConfigs(ctx, cluster.Spec.Instrumentation, instanceConfigMap, []collector.LogrotateConfig{{ - LogFiles: []string{naming.PGBackRestPGDataLogPath + "/*.log"}, + LogFiles: []string{util.GetPGBackRestLogPathForInstance(cluster) + "/*.log"}, }}) } } diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 0acb86513..d31350fd4 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "path/filepath" "reflect" "regexp" "sort" @@ -38,6 +39,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/shell" "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -821,7 +823,13 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, } } else { - container.Command = []string{"/bin/pgbackrest", "backup"} + mkdirCommand := "" + cloudLogPath := getCloudLogPath(postgresCluster) + if cloudLogPath != "" { + mkdirCommand += shell.MakeDirectories(cloudLogPath, cloudLogPath) + "; " + } + + container.Command = []string{"sh", "-c", "--", mkdirCommand + `exec "$@"`, "--", "/bin/pgbackrest", "backup"} container.Command = append(container.Command, cmdOpts...) } @@ -885,8 +893,8 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template) // Mount the PVC named in the "pgbackrest-cloud-log-volume" annotation, if any. - if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { - util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolumeName) + if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { + util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolume) } } @@ -2075,28 +2083,7 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { - // If the user has specified a PVC to use as a log volume for cloud backups via the - // PGBackRestCloudLogVolume annotation, check for the PVC. If we find it, set the cloud - // log path. If the user has specified a PVC, but we can't find it, create a warning event. - cloudLogPath := "" - if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { - logVolume := &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: logVolumeName, - Namespace: postgresCluster.GetNamespace(), - }, - } - err := errors.WithStack(r.Client.Get(ctx, - client.ObjectKeyFromObject(logVolume), logVolume)) - if err != nil { - // PVC not retrieved, create warning event - r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, - "PGBackRestCloudLogVolumeNotFound", err.Error()) - } else { - // We successfully found the specified PVC, so we will set the log path - cloudLogPath = "/volumes/" + logVolumeName - } - } + cloudLogPath := getCloudLogPath(postgresCluster) backrestConfig, err := pgbackrest.CreatePGBackRestConfigMapIntent(ctx, postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, cloudLogPath, instanceNames) @@ -3351,3 +3338,23 @@ func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCl } return false } + +// getCloudLogPath is responsible for determining the appropriate log path for pgbackrest +// in cloud backup jobs. If the user has specified a PVC to use as a log volume for cloud +// backups via the PGBackRestCloudLogVolume annotation, set the cloud log path accordingly. +// If the user has not set the PGBackRestCloudLogVolume annotation, but has set a log path +// via the spec, use that. +// TODO: Make sure this is what we want (i.e. annotation to take precedence over spec) +// +// This function assumes that the backups/pgbackrest spec is present in postgresCluster. +func getCloudLogPath(postgresCluster *v1beta1.PostgresCluster) string { + cloudLogPath := "" + if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { + cloudLogPath = "/volumes/" + logVolume + } else if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil && + postgresCluster.Spec.Backups.PGBackRest.Jobs.Log != nil && + postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path != "" { + cloudLogPath = filepath.Clean(postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path) + } + return cloudLogPath +} diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index d87223a2e..b4e590411 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -2663,6 +2663,11 @@ func TestGenerateBackupJobIntent(t *testing.T) { assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: + - sh + - -c + - -- + - exec "$@" + - -- - /bin/pgbackrest - backup - --stanza=db @@ -2965,6 +2970,12 @@ volumes: assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: + - sh + - -c + - -- + - mkdir -p '/volumes/another-pvc' && { chmod 0775 '/volumes/another-pvc' || :; }; + exec "$@" + - -- - /bin/pgbackrest - backup - --stanza=db @@ -3031,7 +3042,11 @@ volumes: cluster := cluster.DeepCopy() cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/stuff/log", + }, Volumes: &v1beta1.PGBackRestVolumesSpec{ Additional: []v1beta1.AdditionalVolume{ { @@ -3048,15 +3063,66 @@ volumes: nil, nil, ) - for _, container := range spec.Template.Spec.Containers { - assert.Assert(t, cmp.MarshalContains(container.VolumeMounts, - ` -- mountPath: /volumes/stuff - name: volumes-stuff`)) - } - - assert.Assert(t, cmp.MarshalContains(spec.Template.Spec.Volumes, - ` + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff/log' && { chmod 0775 '/volumes/stuff/log' || :; }; exec + "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp - name: volumes-stuff persistentVolumeClaim: claimName: additional-pvc`)) @@ -4497,3 +4563,51 @@ func TestGetRepoHostVolumeRequests(t *testing.T) { }) } } + +func TestGetCloudLogPath(t *testing.T) { + t.Run("NoAnnotationNoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{} + assert.Equal(t, getCloudLogPath(postgrescluster), "") + }) + + t.Run("AnnotationSetNoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{} + postgrescluster.Annotations = map[string]string{} + postgrescluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/another-pvc") + }) + + t.Run("NoAnnotationSpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Jobs: &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log/", + }, + }, + }, + }, + }, + } + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/test/log") + }) + + t.Run("BothAnnotationAndSpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log", + }, + }, + }, + }, + } + postgrescluster.Annotations = map[string]string{} + postgrescluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/another-pvc") + }) +} diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 090f119d1..f1d1fc30f 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -71,8 +71,8 @@ const ( // CreatePGBackRestConfigMapIntent creates a configmap struct with pgBackRest pgbackrest.conf settings in the data field. // The keys within the data field correspond to the use of that configuration. -// pgbackrest_job.conf is used by certain jobs, such as stanza create and backup -// pgbackrest_primary.conf is used by the primary database pod +// pgbackrest-server.conf is used by the pgBackRest TLS server +// pgbackrest_instance.conf is used by the primary database pod // pgbackrest_repo.conf is used by the pgBackRest repository pod // pgbackrest_cloud.conf is used by cloud repo backup jobs func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, @@ -112,6 +112,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet strconv.Itoa(postgresCluster.Spec.PostgresVersion), pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, + util.GetPGBackRestLogPathForInstance(postgresCluster), ).String() // As the cluster transitions from having a repository host to having none, @@ -122,6 +123,9 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet serverConfig(postgresCluster).String() if RepoHostVolumeDefined(postgresCluster) && repoHostName != "" { + // Get pgbackrest log path for repo host pod + pgBackRestLogPath := generateRepoHostLogPath(postgresCluster) + cm.Data[CMRepoKey] = iniGeneratedWarning + populateRepoHostConfigurationMap( serviceName, serviceNamespace, @@ -130,26 +134,20 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, + pgBackRestLogPath, ).String() if collector.OpenTelemetryLogsOrMetricsEnabled(ctx, postgresCluster) { - err = collector.AddToConfigMap(ctx, collector.NewConfigForPgBackrestRepoHostPod( ctx, postgresCluster.Spec.Instrumentation, postgresCluster.Spec.Backups.PGBackRest.Repos, + pgBackRestLogPath, ), cm) // If OTel logging is enabled, add logrotate config for the RepoHost if err == nil && collector.OpenTelemetryLogsEnabled(ctx, postgresCluster) { - var pgBackRestLogPath string - for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if repo.Volume != nil { - pgBackRestLogPath = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } collector.AddLogrotateConfigs(ctx, postgresCluster.Spec.Instrumentation, cm, []collector.LogrotateConfig{{ LogFiles: []string{pgBackRestLogPath + "/*.log"}, @@ -180,13 +178,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, cluster *v1beta1.PostgresCluster) string { - var pgBackRestLogPath string - for _, repo := range cluster.Spec.Backups.PGBackRest.Repos { - if repo.Volume != nil { - pgBackRestLogPath = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } + pgBackRestLogPath := generateRepoHostLogPath(cluster) container := corev1.Container{ // TODO(log-rotation): The second argument here should be the path @@ -380,7 +372,7 @@ func populatePGInstanceConfigurationMap( serviceName, serviceNamespace, repoHostName, pgdataDir, fetchKeyCommand, postgresVersion string, pgPort int32, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string, + globalConfig map[string]string, pgBackRestLogPath string, ) iniSectionSet { // TODO(cbandy): pass a FQDN in already. @@ -396,7 +388,7 @@ func populatePGInstanceConfigurationMap( // pgBackRest spool-path should always be co-located with the Postgres WAL path. global.Set("spool-path", "/pgdata/pgbackrest-spool") // pgBackRest will log to the pgData volume for commands run on the PostgreSQL instance - global.Set("log-path", naming.PGBackRestPGDataLogPath) + global.Set("log-path", pgBackRestLogPath) for _, repo := range repos { global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) @@ -450,13 +442,12 @@ func populateRepoHostConfigurationMap( serviceName, serviceNamespace, pgdataDir, fetchKeyCommand, postgresVersion string, pgPort int32, pgHosts []string, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string, + globalConfig map[string]string, logPath string, ) iniSectionSet { global := iniMultiSet{} stanza := iniMultiSet{} - var pgBackRestLogPathSet bool for _, repo := range repos { global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) @@ -468,20 +459,14 @@ func populateRepoHostConfigurationMap( global.Set(option, val) } } - - if !pgBackRestLogPathSet && repo.Volume != nil { - // pgBackRest will log to the first configured repo volume when commands - // are run on the pgBackRest repo host. With our previous check in - // RepoHostVolumeDefined(), we've already validated that at least one - // defined repo has a volume. - global.Set("log-path", fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name)) - pgBackRestLogPathSet = true - } } - // If no log path was set, don't log because the default path is not writable. - if !pgBackRestLogPathSet { + // If no log path was provided, don't log because the default path is not writable. + // Otherwise, set the log-path. + if logPath == "" { global.Set("log-level-file", "off") + } else { + global.Set("log-path", logPath) } for option, val := range globalConfig { @@ -818,3 +803,24 @@ func serverConfig(cluster *v1beta1.PostgresCluster) iniSectionSet { "global:server": server, } } + +// generateRepoHostLogPath takes a postgrescluster and returns the log path that +// should be used by pgbackrest in the Repo Host Pod based on the repos specified +// and whether the user has specified a log path. +// +// This function assumes that the backups/pgbackrest spec is present in cluster. +func generateRepoHostLogPath(cluster *v1beta1.PostgresCluster) string { + for _, repo := range cluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume != nil { + // If the user has set a log path in the spec, use it. + // Otherwise, default to /pgbackrest/repo#/log + if cluster.Spec.Backups.PGBackRest.RepoHost != nil && + cluster.Spec.Backups.PGBackRest.RepoHost.Log != nil && + cluster.Spec.Backups.PGBackRest.RepoHost.Log.Path != "" { + return cluster.Spec.Backups.PGBackRest.RepoHost.Log.Path + } + return fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) + } + } + return "" +} diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index 4617b3a80..91ce833c0 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -451,6 +451,104 @@ pg1-socket-path = /tmp/postgres `, "\t\n")+"\n") }) + t.Run("LoggingToAdditionalVolume", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.UID = "guitar" + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + cluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test", + }, + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "some-pvc", + Name: "test", + }, + }, + }, + } + + configmap, err := CreatePGBackRestConfigMapIntent(context.Background(), cluster, + "repo-hostname", "anumber", "pod-service-name", "test-ns", "", + []string{"some-instance"}) + + assert.NilError(t, err) + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + + assert.Equal(t, configmap.Data["config-hash"], "anumber") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@guitar=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` +repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +repo1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +repo1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +repo1-host-type = tls +repo1-host-user = postgres +repo1-path = /pgbackrest/repo1 +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-path = /volumes/test +repo1-path = /pgbackrest/repo1 + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], "") + }) + t.Run("CustomMetadata", func(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Metadata = &v1beta1.Metadata{ @@ -799,3 +897,89 @@ log-level-stderr = error log-timestamp = n `) } + +func TestGenerateRepoHostLogPath(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Namespace = "ns1" + cluster.Name = "hippo-dance" + + cluster.Spec.Port = initialize.Int32(2345) + cluster.Spec.PostgresVersion = 12 + + cluster.Spec.Backups = v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + } + + t.Run("NoReposNoRepoHost", func(t *testing.T) { + cluster := cluster.DeepCopy() + assert.Equal(t, generateRepoHostLogPath(cluster), "") + }) + + t.Run("NoVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "") + }) + + t.Run("OneVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo1/log") + }) + + t.Run("TwoVolumeRepos", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + { + Name: "repo2", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo1/log") + }) + + t.Run("VolumeRepoNotFirst", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{}, + }, + { + Name: "repo2", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo2/log") + }) + + t.Run("LogPathSpecified", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + cluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/some/directory", + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/some/directory") + }) +} diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 1e9f52a7e..0300d4d34 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -423,8 +423,8 @@ func startupCommand( `(`+shell.MakeDirectories(dataMountPath, naming.PatroniPGDataLogPath)+`) ||`, `halt "$(permissions `+shell.QuoteWord(naming.PatroniPGDataLogPath)+` ||:)"`, - `(`+shell.MakeDirectories(dataMountPath, naming.PGBackRestPGDataLogPath)+`) ||`, - `halt "$(permissions `+shell.QuoteWord(naming.PGBackRestPGDataLogPath)+` ||:)"`, + `(`+shell.MakeDirectories(dataMountPath, util.GetPGBackRestLogPathForInstance(cluster))+`) ||`, + `halt "$(permissions `+shell.QuoteWord(util.GetPGBackRestLogPathForInstance(cluster))+` ||:)"`, ) pg_rewind_override := "" diff --git a/internal/testing/validation/pgbackrest_test.go b/internal/testing/validation/pgbackrest_test.go new file mode 100644 index 000000000..622696705 --- /dev/null +++ b/internal/testing/validation/pgbackrest_test.go @@ -0,0 +1,310 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package validation + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" +) + +func TestV1PGBackRestLogging(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + t.Parallel() + + namespace := require.Namespace(t, cc) + + base := v1.NewPostgresCluster() + base.Namespace = namespace.Name + base.Name = "pgbackrest-logging" + // required fields + require.UnmarshalInto(t, &base.Spec, `{ + postgresVersion: 16, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + backups: { + pgbackrest: { + repos: [{ + name: repo1, + }] + }, + }, + }`) + + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base to be valid") + + t.Run("Cannot set log-path via global", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + global: { + log-path: "/anything" + } + }`) + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "pgbackrest log-path must be set via the various log.path fields in the spec") + }) + + t.Run("Cannot set pgbackrest sidecar's log.path without correct subdir", func(t *testing.T) { + tmp := base.DeepCopy() + + t.Run("Wrong subdir", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/something/wrong" + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "pgbackrest sidecar log path is restricted to an existing additional volume") + }) + + t.Run("Single instance - missing additional volume", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Multiple instances - one missing additional volume", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }]`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Single instance - additional volume present", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + }]`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid") + }) + + t.Run("Multiple instances - additional volume present but not matching path", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "another", + claimName: "another-pvc-claim" + }] + } + }]`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Multiple instances - additional volumes present and matching log path", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "another-pvc-claim" + }] + } + }]`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid") + }) + }) + + t.Run("Cannot set logging on volumes that don't exist", func(t *testing.T) { + t.Run("Repo Host", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + repoHost: { + log: { + path: "/volumes/wrong" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "repo host log path is restricted to an existing additional volume") + }) + + t.Run("Backup Jobs", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + jobs: { + log: { + path: "/volumes/wrong" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "backup jobs log path is restricted to an existing additional volume") + }) + }) + + t.Run("Can set logging on volumes that do exist", func(t *testing.T) { + t.Run("Repo Host", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + repoHost: { + log: { + path: "/volumes/logging/logs" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), + "expected this configuration to be valid") + }) + + t.Run("Backup Jobs", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + jobs: { + log: { + path: "/volumes/logging/logs" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), + "expected this configuration to be valid") + }) + }) +} diff --git a/internal/util/pgbackrest.go b/internal/util/pgbackrest.go new file mode 100644 index 000000000..8452c16b9 --- /dev/null +++ b/internal/util/pgbackrest.go @@ -0,0 +1,26 @@ +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "path/filepath" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// GetInstanceLogPath is responsible for determining the appropriate log path for pgbackrest +// in instance pods. If the user has set a log path via the spec, use that. Otherwise, use +// the default log path set in the naming package. Ensure trailing slashes are trimmed. +// +// This function assumes that the backups/pgbackrest spec is present in postgresCluster. +func GetPGBackRestLogPathForInstance(postgresCluster *v1beta1.PostgresCluster) string { + logPath := naming.PGBackRestPGDataLogPath + if postgresCluster.Spec.Backups.PGBackRest.Log != nil && + postgresCluster.Spec.Backups.PGBackRest.Log.Path != "" { + logPath = postgresCluster.Spec.Backups.PGBackRest.Log.Path + } + return filepath.Clean(logPath) +} diff --git a/internal/util/pgbackrest_test.go b/internal/util/pgbackrest_test.go new file mode 100644 index 000000000..e654436af --- /dev/null +++ b/internal/util/pgbackrest_test.go @@ -0,0 +1,42 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGetPGBackRestLogPathForInstance(t *testing.T) { + t.Run("NoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + }, + }, + } + assert.Equal(t, GetPGBackRestLogPathForInstance(postgrescluster), naming.PGBackRestPGDataLogPath) + }) + + t.Run("SpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log", + }, + }, + }, + }, + } + assert.Equal(t, GetPGBackRestLogPathForInstance(postgrescluster), "/volumes/test/log") + }) +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go new file mode 100644 index 000000000..77076a5de --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go @@ -0,0 +1,18 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// PGBackRestArchive defines a pgBackRest archive configuration +// +kubebuilder:validation:XValidation:rule=`!self.?log.path.hasValue() || self.log.path.startsWith("/volumes/")`,message=`pgbackrest sidecar log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?repoHost.log.path.hasValue() || self.repoHost.volumes.additional.exists(x, self.repoHost.log.path.startsWith("/volumes/"+x.name))`,message=`repo host log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?jobs.log.path.hasValue() || self.jobs.volumes.additional.exists(x, self.jobs.log.path.startsWith("/volumes/"+x.name))`,message=`backup jobs log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?global["log-path"].hasValue()`,message=`pgbackrest log-path must be set via the various log.path fields in the spec` +type PGBackRestArchive struct { + v1beta1.PGBackRestArchive `json:",inline"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index 1a642e13c..d2f38441d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -22,6 +22,10 @@ import ( // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "volumes.temp" to log in "/pgtmp"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgtmp/logs/postgres") || self.instances.all(i, i.?volumes.temp.hasValue())).orValue(true)` // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "walVolumeClaimSpec" to log in "/pgwal"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgwal/logs/postgres") || self.instances.all(i, i.?walVolumeClaimSpec.hasValue())).orValue(true)` // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need an additional volume to log in "/volumes"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)` +// +// # pgBackRest Logging +// +// +kubebuilder:validation:XValidation:fieldPath=`.backups.pgbackrest.log.path`,message=`all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`,rule=`self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)` type PostgresClusterSpec struct { // +optional Metadata *v1beta1.Metadata `json:"metadata,omitempty"` @@ -68,6 +72,7 @@ type PostgresClusterSpec struct { // namespace as the cluster. // +optional DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` + // Whether or not the PostgreSQL cluster should use the defined default // scheduling constraints. If the field is unset or false, the default // scheduling constraints will be used in addition to any custom constraints @@ -356,7 +361,7 @@ type Backups struct { // pgBackRest archive configuration // +optional - PGBackRest v1beta1.PGBackRestArchive `json:"pgbackrest"` + PGBackRest PGBackRestArchive `json:"pgbackrest"` // VolumeSnapshot configuration // +optional @@ -538,6 +543,8 @@ type PostgresInstanceSetSpec struct { // +optional TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` + // Volumes to be added to the instance set. + // +optional Volumes *v1beta1.PostgresVolumesSpec `json:"volumes,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go index 80043ab76..46d181707 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go @@ -182,6 +182,22 @@ func (in *MonitoringStatus) DeepCopy() *MonitoringStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { + *out = *in + in.PGBackRestArchive.DeepCopyInto(&out.PGBackRestArchive) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGBackRestArchive. +func (in *PGBackRestArchive) DeepCopy() *PGBackRestArchive { + if in == nil { + return nil + } + out := new(PGBackRestArchive) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { *out = *in diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go index d9777bdcd..0f87676a7 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go @@ -89,7 +89,7 @@ type PGBackRestArchive struct { // +optional Metadata *Metadata `json:"metadata,omitempty"` - // Projected volumes containing custom pgBackRest configuration. These files are mounted + // Projected volumes containing custom pgBackRest configuration. These files are mounted // under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the // PostgreSQL Operator: // https://pgbackrest.org/configuration.html @@ -113,6 +113,10 @@ type PGBackRestArchive struct { // +optional Jobs *BackupJobs `json:"jobs,omitempty"` + // Logging configuration for pgbackrest processes running in postgres instance pods. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Defines a pgBackRest repository // +kubebuilder:validation:MinItems=1 // +listType=map @@ -155,6 +159,10 @@ type BackupJobs struct { // +optional Resources corev1.ResourceRequirements `json:"resources,omitzero"` + // Logging configuration for pgbackrest processes running in Backup Job Pods. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Priority class name for the pgBackRest backup Job pods. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ // +optional @@ -215,6 +223,10 @@ type PGBackRestRepoHost struct { // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Logging configuration for pgbackrest processes running in the repo host pod. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Priority class name for the pgBackRest repo host pod. Changing this value // causes PostgreSQL to restart. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 4374fa5e4..525f772d9 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -59,6 +59,7 @@ type PostgresClusterSpec struct { // namespace as the cluster. // +optional DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` + // Whether or not the PostgreSQL cluster should use the defined default // scheduling constraints. If the field is unset or false, the default // scheduling constraints will be used in addition to any custom constraints @@ -526,6 +527,8 @@ type PostgresInstanceSetSpec struct { // +optional TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` + // Volumes to be added to the instance set. + // +optional Volumes *PostgresVolumesSpec `json:"volumes,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index 4f276a8d0..c6351ca86 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -383,3 +383,10 @@ func (in *AdditionalVolume) AsVolume(name string) corev1.Volume { return out } + +// LoggingConfiguration provides logging configuration for various components +type LoggingConfiguration struct { + // +kubebuilder:validation:MaxLength=256 + // +optional + Path string `json:"path,omitempty"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index 884386982..ac271ad54 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -67,6 +67,11 @@ func (in *AutoGrowSpec) DeepCopy() *AutoGrowSpec { func (in *BackupJobs) DeepCopyInto(out *BackupJobs) { *out = *in in.Resources.DeepCopyInto(&out.Resources) + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.PriorityClassName != nil { in, out := &in.PriorityClassName, &out.PriorityClassName *out = new(string) @@ -658,6 +663,21 @@ func (in *InstrumentationSpec) DeepCopy() *InstrumentationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfiguration. +func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { + if in == nil { + return nil + } + out := new(LoggingConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metadata) DeepCopyInto(out *Metadata) { *out = *in @@ -1156,6 +1176,11 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { *out = new(BackupJobs) (*in).DeepCopyInto(*out) } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.Repos != nil { in, out := &in.Repos, &out.Repos *out = make([]PGBackRestRepo, len(*in)) @@ -1374,6 +1399,11 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.PriorityClassName != nil { in, out := &in.PriorityClassName, &out.PriorityClassName *out = new(string) From 82dfd52e208bb2a9dcb46a95b307c6ac9517170d Mon Sep 17 00:00:00 2001 From: jmckulk Date: Tue, 16 Sep 2025 15:39:24 -0400 Subject: [PATCH 03/43] Update pgWAL autogrow events to warnings WAL volumes growing could indicate an issue with backups that needs to be addressed. This event should be presented to users as a warning. --- .../controller/postgrescluster/autogrow.go | 13 +++++++++--- .../postgrescluster/autogrow_test.go | 20 +++++++++++++------ 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/internal/controller/postgrescluster/autogrow.go b/internal/controller/postgrescluster/autogrow.go index 6abe380c0..f06cd78eb 100644 --- a/internal/controller/postgrescluster/autogrow.go +++ b/internal/controller/postgrescluster/autogrow.go @@ -63,7 +63,11 @@ func (r *Reconciler) storeDesiredRequest( } if limitSet && current.Value() > previous.Value() { - r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeAutoGrow", + eventType := corev1.EventTypeNormal + if volumeType == "pgWAL" { + eventType = corev1.EventTypeWarning + } + r.Recorder.Eventf(cluster, eventType, "VolumeAutoGrow", "%s volume expansion to %v requested for %s/%s.", volumeType, current.String(), cluster.Name, host) } @@ -165,8 +169,11 @@ func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.Postgre // If the user manually requests a lower limit that is smaller than the current // or requested volume size, it will be ignored in favor of the limit value. if volumeRequestSize.Value() >= volumeLimitFromSpec.Value() { - - r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeLimitReached", + eventType := corev1.EventTypeNormal + if volumeType == "pgWAL" { + eventType = corev1.EventTypeWarning + } + r.Recorder.Eventf(cluster, eventType, "VolumeLimitReached", "%s volume(s) for %s/%s are at size limit (%v).", volumeType, cluster.Name, host, volumeLimitFromSpec) diff --git a/internal/controller/postgrescluster/autogrow_test.go b/internal/controller/postgrescluster/autogrow_test.go index e276e60a1..180bd4908 100644 --- a/internal/controller/postgrescluster/autogrow_test.go +++ b/internal/controller/postgrescluster/autogrow_test.go @@ -101,6 +101,7 @@ func TestStoreDesiredRequest(t *testing.T) { expectedLog string expectedNumEvents int expectedEvent string + expectedEventType string }{{ tcName: "PGData-BadRequestNoBackup", Voltype: "pgData", host: "red", @@ -122,13 +123,13 @@ func TestStoreDesiredRequest(t *testing.T) { tcName: "PGData-BadBackupRequest", Voltype: "pgData", host: "red", desiredRequest: "2Gi", desiredRequestBackup: "bar", expectedValue: "2Gi", - expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 2Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 2Gi requested for rhino/red.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 1, expectedLog: "Unable to parse pgData volume request from status backup (bar) for rhino/red", }, { tcName: "PGData-ValueUpdateWithEvent", Voltype: "pgData", host: "red", desiredRequest: "1Gi", desiredRequestBackup: "", expectedValue: "1Gi", - expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 1Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 1Gi requested for rhino/red.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 0, }, { tcName: "PGWAL-BadRequestNoBackup", @@ -156,13 +157,13 @@ func TestStoreDesiredRequest(t *testing.T) { tcName: "PGWAL-BadBackupRequest", Voltype: "pgWAL", host: "red", desiredRequest: "2Gi", desiredRequestBackup: "bar", expectedValue: "2Gi", - expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 2Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 2Gi requested for rhino/red.", expectedEventType: corev1.EventTypeWarning, expectedNumLogs: 1, expectedLog: "Unable to parse pgWAL volume request from status backup (bar) for rhino/red", }, { tcName: "PGWAL-ValueUpdateWithEvent", Voltype: "pgWAL", host: "red", desiredRequest: "1Gi", desiredRequestBackup: "", expectedValue: "1Gi", - expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 1Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 1Gi requested for rhino/red.", expectedEventType: corev1.EventTypeWarning, expectedNumLogs: 0, }, { tcName: "Repo-BadRequestNoBackup", @@ -190,13 +191,13 @@ func TestStoreDesiredRequest(t *testing.T) { tcName: "Repo-BadBackupRequest", Voltype: "repo1", host: "repo-host", desiredRequest: "2Gi", desiredRequestBackup: "bar", expectedValue: "2Gi", - expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 2Gi requested for rhino/repo-host.", + expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 2Gi requested for rhino/repo-host.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 1, expectedLog: "Unable to parse repo1 volume request from status backup (bar) for rhino/repo-host", }, { tcName: "Repo-ValueUpdateWithEvent", Voltype: "repo1", host: "repo-host", desiredRequest: "1Gi", desiredRequestBackup: "", expectedValue: "1Gi", - expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 1Gi requested for rhino/repo-host.", + expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 1Gi requested for rhino/repo-host.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 0, }} @@ -220,6 +221,7 @@ func TestStoreDesiredRequest(t *testing.T) { assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") assert.Equal(t, recorder.Events[0].Note, tc.expectedEvent) + assert.Equal(t, recorder.Events[0].Type, tc.expectedEventType) } assert.Equal(t, len(*logs), tc.expectedNumLogs) if tc.expectedNumLogs == 1 { @@ -430,6 +432,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeRequestOverLimit") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeWarning) assert.Equal(t, recorder.Events[0].Note, "pgData volume request (4Gi) for elephant/some-instance is greater than set limit (3Gi). Limit value will be used.") }) @@ -599,6 +602,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeNormal) assert.Equal(t, recorder.Events[0].Note, "pgData volume(s) for elephant/some-instance are at size limit (2Gi).") }) @@ -629,11 +633,13 @@ resources: if event.Reason == "VolumeLimitReached" { found1 = true assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Type, corev1.EventTypeNormal) assert.Equal(t, event.Note, "pgData volume(s) for elephant/some-instance are at size limit (5Gi).") } if event.Reason == "DesiredVolumeAboveLimit" { found2 = true assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Type, corev1.EventTypeWarning) assert.Equal(t, event.Note, "The desired size (10Gi) for the elephant/some-instance pgData volume(s) is greater than the size limit (5Gi).") } @@ -675,6 +681,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeNormal) assert.Equal(t, recorder.Events[0].Note, "repo1 volume(s) for elephant/repo-host are at size limit (2Gi).") }) @@ -707,6 +714,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeWarning) assert.Equal(t, recorder.Events[0].Note, "pgWAL volume(s) for elephant/another-instance are at size limit (3Gi).") }) From a3007aa1af95ea9109780e578f71c3f93b4987b1 Mon Sep 17 00:00:00 2001 From: jmckulk Date: Tue, 16 Sep 2025 15:40:41 -0400 Subject: [PATCH 04/43] Preserve path when clearing environment Unsetting PATH could lead to issues in some environments (nix) where tools (mktemp) aren't in a standard location. Keep path to avoid issues --- internal/postgres/config_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index cd4962be7..53bcfc1bf 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -256,7 +256,10 @@ func TestBashRecreateDirectory(t *testing.T) { filepath.Join(dir, "d"), "0740") // The assertion below expects alphabetically sorted filenames. // Set an empty environment to always use the default/standard locale. - cmd.Env = []string{} + cmd.Env = []string{ + // Preserve the path to find bash tools (i.e., mktemp) + "PATH=" + os.Getenv("PATH"), + } output, err := cmd.CombinedOutput() assert.NilError(t, err, string(output)) assert.Assert(t, cmp.Regexp(`^`+ From 02af3ef5a63ff2381f665cc5187e079eff2d2680 Mon Sep 17 00:00:00 2001 From: TJ Moore Date: Wed, 10 Sep 2025 17:55:43 -0400 Subject: [PATCH 05/43] Update repo volume status handling This update ensures that the repo volume status is preserved in cases where there is an error when applying the repo volume. When there is an error, the status is now preserved until the underlying issue is corrected. Issue: PGO-2654 --- .../controller/postgrescluster/pgbackrest.go | 26 ++++++++++++------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index d31350fd4..f4d43d827 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -2743,7 +2743,7 @@ func (r *Reconciler) reconcileRepos(ctx context.Context, errors := []error{} errMsg := "reconciling repository volume" - repoVols := []*corev1.PersistentVolumeClaim{} + repoVols := make(map[string]*corev1.PersistentVolumeClaim) var replicaCreateRepo v1beta1.PGBackRestRepo if feature.Enabled(ctx, feature.AutoGrowVolumes) && pgbackrest.RepoHostVolumeDefined(postgresCluster) { @@ -2770,16 +2770,15 @@ func (r *Reconciler) reconcileRepos(ctx context.Context, // value to change later. spec.Resources.Limits = nil - repo, err := r.applyRepoVolumeIntent(ctx, postgresCluster, spec, + repoPVC, err := r.applyRepoVolumeIntent(ctx, postgresCluster, spec, repo.Name, repoResources) if err != nil { log.Error(err, errMsg) errors = append(errors, err) - continue - } - if repo != nil { - repoVols = append(repoVols, repo) } + // Store the repo volume after apply. If nil, that indicates a problem + // and the existing status should be preserved. + repoVols[repo.Name] = repoPVC } postgresCluster.Status.PGBackRest.Repos = @@ -2977,7 +2976,7 @@ func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { // existing/current status for any repos in the cluster, the repository volumes // (i.e. PVCs) reconciled for the cluster, and the hashes calculated for the configuration for any // external repositories defined for the cluster. -func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*corev1.PersistentVolumeClaim, +func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes map[string]*corev1.PersistentVolumeClaim, configHashes map[string]string, replicaCreateRepoName string) []v1beta1.RepoStatus { // the new repository status that will be generated and returned @@ -2985,11 +2984,18 @@ func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*corev1. // Update the repo status based on the repo volumes (PVCs) that were reconciled. This includes // updating the status for any existing repository volumes, and adding status for any new - // repository volumes. - for _, rv := range repoVolumes { + // repository volumes. If there was a problem with the volume when an apply was attempted, + // the existing status is preserved. + for repoName, rv := range repoVolumes { newRepoVolStatus := true - repoName := rv.Labels[naming.LabelPGBackRestRepo] for _, rs := range repoStatus { + // Preserve the previous status if it exists and the apply failed. + if rs.Name == repoName && rv == nil { + updatedRepoStatus = append(updatedRepoStatus, rs) + newRepoVolStatus = false + break + } + // treat as new status if contains properties of a cloud (s3, gcr or azure) repo if rs.Name == repoName && rs.RepoOptionsHash == "" { newRepoVolStatus = false From ed158595e92b3a014258ec66e1516aa198213012 Mon Sep 17 00:00:00 2001 From: TJ Moore Date: Thu, 11 Sep 2025 11:53:37 -0400 Subject: [PATCH 06/43] Volume auto-grow naming updates Certain variables make the behavior of the volume auto-grow code unclear. This updates variables and adds a comment to better indicate the behavior in question. --- .../controller/postgrescluster/autogrow.go | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/internal/controller/postgrescluster/autogrow.go b/internal/controller/postgrescluster/autogrow.go index f06cd78eb..e96d69f19 100644 --- a/internal/controller/postgrescluster/autogrow.go +++ b/internal/controller/postgrescluster/autogrow.go @@ -156,12 +156,13 @@ func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.Postgre // Otherwise, if the feature gate is not enabled, do not autogrow. } else if feature.Enabled(ctx, feature.AutoGrowVolumes) { - // determine the appropriate volume request based on what's set in the status - if dpv, err := getDesiredVolumeSize( + // Determine the appropriate volume request based on what's set in the status. + // Note: request size set by reference. + if badDesiredVolumeRequest, err := getDesiredVolumeSize( cluster, volumeType, host, volumeRequestSize, ); err != nil { log.Error(err, "For "+cluster.Name+"/"+host+ - ": Unable to parse "+volumeType+" volume request: "+dpv) + ": Unable to parse "+volumeType+" volume request: "+badDesiredVolumeRequest) } // If the volume request size is greater than or equal to the limit and the @@ -203,15 +204,15 @@ func getDesiredVolumeSize(cluster *v1beta1.PostgresCluster, case volumeType == "pgData": for i := range cluster.Status.InstanceSets { if instanceSpecName == cluster.Status.InstanceSets[i].Name { - for _, dpv := range cluster.Status.InstanceSets[i].DesiredPGDataVolume { - if dpv != "" { - desiredRequest, err := resource.ParseQuantity(dpv) + for _, desiredRequestString := range cluster.Status.InstanceSets[i].DesiredPGDataVolume { + if desiredRequestString != "" { + desiredRequest, err := resource.ParseQuantity(desiredRequestString) if err == nil { if desiredRequest.Value() > volumeRequestSize.Value() { *volumeRequestSize = desiredRequest } } else { - return dpv, err + return desiredRequestString, err } } } @@ -221,15 +222,15 @@ func getDesiredVolumeSize(cluster *v1beta1.PostgresCluster, case volumeType == "pgWAL": for i := range cluster.Status.InstanceSets { if instanceSpecName == cluster.Status.InstanceSets[i].Name { - for _, dpv := range cluster.Status.InstanceSets[i].DesiredPGWALVolume { - if dpv != "" { - desiredRequest, err := resource.ParseQuantity(dpv) + for _, desiredRequestString := range cluster.Status.InstanceSets[i].DesiredPGWALVolume { + if desiredRequestString != "" { + desiredRequest, err := resource.ParseQuantity(desiredRequestString) if err == nil { if desiredRequest.Value() > volumeRequestSize.Value() { *volumeRequestSize = desiredRequest } } else { - return dpv, err + return desiredRequestString, err } } } @@ -245,15 +246,15 @@ func getDesiredVolumeSize(cluster *v1beta1.PostgresCluster, } for i := range cluster.Status.PGBackRest.Repos { if volumeType == cluster.Status.PGBackRest.Repos[i].Name { - dpv := cluster.Status.PGBackRest.Repos[i].DesiredRepoVolume - if dpv != "" { - desiredRequest, err := resource.ParseQuantity(dpv) + desiredRequestString := cluster.Status.PGBackRest.Repos[i].DesiredRepoVolume + if desiredRequestString != "" { + desiredRequest, err := resource.ParseQuantity(desiredRequestString) if err == nil { if desiredRequest.Value() > volumeRequestSize.Value() { *volumeRequestSize = desiredRequest } } else { - return dpv, err + return desiredRequestString, err } } } From cc715919ed94f1de150d14519995b623f4a12746 Mon Sep 17 00:00:00 2001 From: Drew Sessler Date: Wed, 17 Sep 2025 12:50:15 -0700 Subject: [PATCH 07/43] Adjust cloud backup volume behavior such that additional volumes take precedence when there is a naming collision with the pgbackrest-cloud-log-volume annotation. If annotation is present and no log path is given via the spec, use the log path that the annotation logic yields. Add/adjust tests appropriately. --- .../controller/postgrescluster/pgbackrest.go | 41 ++- .../postgrescluster/pgbackrest_test.go | 314 +++++++++++++++++- 2 files changed, 342 insertions(+), 13 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index f4d43d827..b4067a83f 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -892,9 +892,29 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl jobSpec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template) - // Mount the PVC named in the "pgbackrest-cloud-log-volume" annotation, if any. + // If the "pgbackrest-cloud-log-volume" annotation has a value, check if it is the + // same as any of the additional volume names. If there is a collision of names, + // create a warning event. If there is no name collision, mount the volume referenced + // by the annotation. if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { - util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolume) + var collisionFound bool + if jobs != nil && jobs.Volumes != nil { + for _, volume := range jobs.Volumes.Additional { + if volume.Name == logVolume { + collisionFound = true + r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, + "DuplicateCloudBackupVolume", "The volume name specified in the "+ + "pgbackrest-cloud-log-volume annotation is the same as one "+ + "specified in spec.backups.pgbackrest.jobs.volumes.additional. "+ + "Cannot mount duplicate volume names. Defaulting to the "+ + "additional volume.") + break + } + } + } + if !collisionFound { + util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolume) + } } } @@ -3346,21 +3366,22 @@ func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCl } // getCloudLogPath is responsible for determining the appropriate log path for pgbackrest -// in cloud backup jobs. If the user has specified a PVC to use as a log volume for cloud -// backups via the PGBackRestCloudLogVolume annotation, set the cloud log path accordingly. -// If the user has not set the PGBackRestCloudLogVolume annotation, but has set a log path -// via the spec, use that. -// TODO: Make sure this is what we want (i.e. annotation to take precedence over spec) +// in cloud backup jobs. If the user specified a log path via the spec, use it. Otherwise, +// if the user specified a log volume for cloud backups via the PGBackRestCloudLogVolume +// annotation, we will use that. If neither scenario is true, return an empty string. // // This function assumes that the backups/pgbackrest spec is present in postgresCluster. func getCloudLogPath(postgresCluster *v1beta1.PostgresCluster) string { cloudLogPath := "" - if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { - cloudLogPath = "/volumes/" + logVolume - } else if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil && + if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil && postgresCluster.Spec.Backups.PGBackRest.Jobs.Log != nil && postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path != "" { + // TODO: I know it should be caught by CEL validation, but is it worthwhile to also + // check that Log.Path ~= "/volumes/" + existingAdditionalVolume.name here?? + cloudLogPath = filepath.Clean(postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path) + } else if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { + cloudLogPath = "/volumes/" + logVolume } return cloudLogPath } diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index b4e590411..715fcb2d4 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -3130,6 +3130,312 @@ volumes: // No events created assert.Equal(t, len(recorder.Events), 0) }) + + t.Run("AdditionalVolumesMissingContainers", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + r.Recorder = recorder + + cluster := cluster.DeepCopy() + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/stuff/log", + }, + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "additional-pvc", + Containers: []v1beta1.DNS1123Label{ + "pgbackrest", + "non-existent-container", + }, + Name: "stuff", + }, + }, + }, + } + + spec := r.generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff/log' && { chmod 0775 '/volumes/stuff/log' || :; }; exec + "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp +- name: volumes-stuff + persistentVolumeClaim: + claimName: additional-pvc`)) + + // Missing containers warning event created + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "SpecifiedContainerNotFound") + assert.Equal(t, recorder.Events[0].Note, "The following Backup Job Pod "+ + "containers were specified for additional volumes but cannot be "+ + "found: [non-existent-container].") + }) + + t.Run("AnnotationAndAdditionalVolumeWithPath", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + r.Recorder = recorder + + cluster := cluster.DeepCopy() + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.PGBackRestCloudLogVolume] = "stuff" + + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/stuff/log", + }, + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "additional-pvc", + Name: "stuff", + }, + }, + }, + } + + spec := r.generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff/log' && { chmod 0775 '/volumes/stuff/log' || :; }; exec + "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp +- name: volumes-stuff + persistentVolumeClaim: + claimName: additional-pvc`)) + + // Annotation/additional volume collision warning event created + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "DuplicateCloudBackupVolume") + assert.Equal(t, recorder.Events[0].Note, "The volume name specified in "+ + "the pgbackrest-cloud-log-volume annotation is the same as one "+ + "specified in spec.backups.pgbackrest.jobs.volumes.additional. Cannot "+ + "mount duplicate volume names. Defaulting to the additional volume.") + }) + + t.Run("AnnotationAndAdditionalVolumeNoPath", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + r.Recorder = recorder + + cluster := cluster.DeepCopy() + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.PGBackRestCloudLogVolume] = "stuff" + + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "additional-pvc", + Name: "stuff", + }, + }, + }, + } + + spec := r.generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff' && { chmod 0775 '/volumes/stuff' || :; }; exec "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp +- name: volumes-stuff + persistentVolumeClaim: + claimName: additional-pvc`)) + + // Annotation/additional volume collision warning event created + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "DuplicateCloudBackupVolume") + assert.Equal(t, recorder.Events[0].Note, "The volume name specified in "+ + "the pgbackrest-cloud-log-volume annotation is the same as one "+ + "specified in spec.backups.pgbackrest.jobs.volumes.additional. Cannot "+ + "mount duplicate volume names. Defaulting to the additional volume.") + }) } func TestGenerateRepoHostIntent(t *testing.T) { @@ -4599,8 +4905,10 @@ func TestGetCloudLogPath(t *testing.T) { Spec: v1beta1.PostgresClusterSpec{ Backups: v1beta1.Backups{ PGBackRest: v1beta1.PGBackRestArchive{ - Log: &v1beta1.LoggingConfiguration{ - Path: "/volumes/test/log", + Jobs: &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log/", + }, }, }, }, @@ -4608,6 +4916,6 @@ func TestGetCloudLogPath(t *testing.T) { } postgrescluster.Annotations = map[string]string{} postgrescluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" - assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/another-pvc") + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/test/log") }) } From a2c529bad459ac92d406c33a09cdef125bb19c00 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Sep 2025 17:49:55 +0000 Subject: [PATCH 08/43] Bump the opentelemetry group with 6 updates Bumps the opentelemetry group with 6 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/contrib/exporters/autoexport](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.57.0` | `0.62.0` | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.57.0` | `0.62.0` | | [go.opentelemetry.io/contrib/propagators/autoprop](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.57.0` | `0.62.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.32.0` | `1.37.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.32.0` | `1.37.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.32.0` | `1.37.0` | Updates `go.opentelemetry.io/contrib/exporters/autoexport` from 0.57.0 to 0.62.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.57.0...zpages/v0.62.0) Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.57.0 to 0.62.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.57.0...zpages/v0.62.0) Updates `go.opentelemetry.io/contrib/propagators/autoprop` from 0.57.0 to 0.62.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.57.0...zpages/v0.62.0) Updates `go.opentelemetry.io/otel` from 1.32.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...v1.37.0) Updates `go.opentelemetry.io/otel/sdk` from 1.32.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...v1.37.0) Updates `go.opentelemetry.io/otel/trace` from 1.32.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...v1.37.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/exporters/autoexport dependency-version: 0.62.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: opentelemetry - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-version: 0.62.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: opentelemetry - dependency-name: go.opentelemetry.io/contrib/propagators/autoprop dependency-version: 0.62.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: opentelemetry - dependency-name: go.opentelemetry.io/otel dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: opentelemetry - dependency-name: go.opentelemetry.io/otel/sdk dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: opentelemetry - dependency-name: go.opentelemetry.io/otel/trace dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: opentelemetry ... Signed-off-by: dependabot[bot] --- go.mod | 80 +++++++++++++-------------- go.sum | 168 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 129 insertions(+), 119 deletions(-) diff --git a/go.mod b/go.mod index 287be117f..0ec845635 100644 --- a/go.mod +++ b/go.mod @@ -16,12 +16,12 @@ require ( github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/xdg-go/stringprep v1.0.4 - go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 - go.opentelemetry.io/contrib/propagators/autoprop v0.57.0 - go.opentelemetry.io/otel v1.33.0 - go.opentelemetry.io/otel/sdk v1.33.0 - go.opentelemetry.io/otel/trace v1.33.0 + go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 + go.opentelemetry.io/contrib/propagators/autoprop v0.63.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 golang.org/x/crypto v0.41.0 golang.org/x/tools v0.36.0 gotest.tools/v3 v3.5.2 @@ -37,12 +37,12 @@ require ( ) require ( - cel.dev/expr v0.19.1 // indirect + cel.dev/expr v0.24.0 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -63,7 +63,8 @@ require ( github.com/google/gnostic-models v0.6.9 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -76,36 +77,37 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect - go.opentelemetry.io/contrib/propagators/aws v1.32.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.32.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.32.0 // indirect - go.opentelemetry.io/contrib/propagators/ot v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect - go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect + go.opentelemetry.io/contrib/propagators/aws v1.38.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.38.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect @@ -113,7 +115,7 @@ require ( golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.27.0 // indirect golang.org/x/net v0.43.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect golang.org/x/sys v0.35.0 // indirect golang.org/x/term v0.34.0 // indirect @@ -121,10 +123,10 @@ require ( golang.org/x/time v0.9.0 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/grpc v1.68.1 // indirect - google.golang.org/protobuf v1.36.7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index d60a6185f..757aed03f 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= -cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= @@ -10,8 +10,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -76,8 +76,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/itchyny/gojq v0.12.17 h1:8av8eGduDb5+rvEdaOO+zQUjA04MS0m3Ps8HiD+fceg= @@ -134,14 +136,16 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -163,8 +167,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= @@ -174,60 +178,62 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/contrib/propagators/autoprop v0.57.0 h1:bNPJOdT5154XxzeFmrh8R+PXnV4t3TZEczy8gHEpcpg= -go.opentelemetry.io/contrib/propagators/autoprop v0.57.0/go.mod h1:Tb0j0mK+QatKdCxCKPN7CSzc7kx/q34/KaohJx/N96s= -go.opentelemetry.io/contrib/propagators/aws v1.32.0 h1:NELzr8bW7a7aHVZj5gaep1PfkvoSCGx+1qNGZx/uhhU= -go.opentelemetry.io/contrib/propagators/aws v1.32.0/go.mod h1:XKMrzHNka3eOA+nGEcNKYVL9s77TAhkwQEynYuaRFnQ= -go.opentelemetry.io/contrib/propagators/b3 v1.32.0 h1:MazJBz2Zf6HTN/nK/s3Ru1qme+VhWU5hm83QxEP+dvw= -go.opentelemetry.io/contrib/propagators/b3 v1.32.0/go.mod h1:B0s70QHYPrJwPOwD1o3V/R8vETNOG9N3qZf4LDYvA30= -go.opentelemetry.io/contrib/propagators/jaeger v1.32.0 h1:K/fOyTMD6GELKTIJBaJ9k3ppF2Njt8MeUGBOwfaWXXA= -go.opentelemetry.io/contrib/propagators/jaeger v1.32.0/go.mod h1:ISE6hda//MTWvtngG7p4et3OCngsrTVfl7c6DjN17f8= -go.opentelemetry.io/contrib/propagators/ot v1.32.0 h1:Poy02A4wOZubHyd2hpHPDgZW+rn6EIq0vCwTZJ6Lmu8= -go.opentelemetry.io/contrib/propagators/ot v1.32.0/go.mod h1:cbhaURV+VR3NIMarzDYZU1RDEkXG1fNd1WMP1XCcGkY= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= -go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= -go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/contrib/propagators/autoprop v0.63.0 h1:S3+4UwR3Y1tUKklruMwOacAFInNvtuOexz4ZTmJNAyw= +go.opentelemetry.io/contrib/propagators/autoprop v0.63.0/go.mod h1:qpIuOggbbw2T9nKRaO1je/oTRKd4zslAcJonN8LYbTg= +go.opentelemetry.io/contrib/propagators/aws v1.38.0 h1:eRZ7asSbLc5dH7+TBzL6hFKb1dabz0IV51uUUwYRZts= +go.opentelemetry.io/contrib/propagators/aws v1.38.0/go.mod h1:wXqc9NTGcXapBExHBDVLEZlByu6quiQL8w7Tjgv8TCg= +go.opentelemetry.io/contrib/propagators/b3 v1.38.0 h1:uHsCCOSKl0kLrV2dLkFK+8Ywk9iKa/fptkytc6aFFEo= +go.opentelemetry.io/contrib/propagators/b3 v1.38.0/go.mod h1:wMRSZJZcY8ya9mApLLhwIMjqmApy2o/Ml+62lhvxyHU= +go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 h1:nXGeLvT1QtCAhkASkP/ksjkTKZALIaQBIW+JSIw1KIc= +go.opentelemetry.io/contrib/propagators/jaeger v1.38.0/go.mod h1:oMvOXk78ZR3KEuPMBgp/ThAMDy9ku/eyUVztr+3G6Wo= +go.opentelemetry.io/contrib/propagators/ot v1.38.0 h1:k4gSyyohaDXI8F9BDXYC3uO2vr5sRNeQFMsN9Zn0EoI= +go.opentelemetry.io/contrib/propagators/ot v1.38.0/go.mod h1:2hDsuiHRO39SRUMhYGqmj64z/IuMRoxE4bBSFR82Lo8= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= +go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= +go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -261,8 +267,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -310,16 +316,18 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From bf6ca905d1f9165261407cec47353bef3dba2859 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 17 Sep 2025 23:23:54 -0500 Subject: [PATCH 09/43] Automatically set an exact maxLength on enum fields in CRDs This automates what a CRD author would do anytime they add or change an enum field. --- ...crunchydata.com_crunchybridgeclusters.yaml | 3 +- ...res-operator.crunchydata.com_pgadmins.yaml | 5 +- ...s-operator.crunchydata.com_pgupgrades.yaml | 5 +- ...ator.crunchydata.com_postgresclusters.yaml | 82 +++++++++++-------- internal/crd/post-process.jq | 12 ++- .../v1/postgrescluster_types.go | 5 -- .../v1beta1/crunchy_bridgecluster_types.go | 4 - .../v1beta1/patroni_types.go | 8 -- .../v1beta1/pgupgrade_types.go | 9 -- .../v1beta1/postgres_types.go | 4 - .../v1beta1/postgrescluster_types.go | 5 -- .../v1beta1/shared_types.go | 13 --- .../v1beta1/standalone_pgadmin_types.go | 9 -- 13 files changed, 67 insertions(+), 97 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml index a49b7a52e..6add75dad 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -90,7 +90,7 @@ spec: - aws - azure - gcp - maxLength: 10 + maxLength: 5 type: string x-kubernetes-validations: - message: immutable @@ -199,6 +199,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index 313fa590a..c729da25e 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -1610,7 +1610,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -2579,7 +2579,7 @@ spec: enum: - Administrator - User - maxLength: 15 + maxLength: 13 type: string username: description: |- @@ -2694,6 +2694,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 2476377b2..240853746 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -971,7 +971,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -1133,7 +1133,7 @@ spec: - Copy - CopyFileRange - Link - maxLength: 15 + maxLength: 13 type: string required: - fromPostgresVersion @@ -1195,6 +1195,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 5a250c935..8556b11d2 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -8074,7 +8074,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -12956,7 +12956,7 @@ spec: - INFO - DEBUG - NOTSET - maxLength: 10 + maxLength: 8 type: string storageLimit: description: |- @@ -13001,7 +13001,7 @@ spec: enum: - Switchover - Failover - maxLength: 15 + maxLength: 10 type: string required: - enabled @@ -15915,14 +15915,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -15932,6 +15932,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -15940,6 +15941,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -15968,7 +15970,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object sidecars: @@ -16322,14 +16324,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -16339,6 +16341,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -16347,6 +16350,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -16375,7 +16379,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object service: @@ -16387,14 +16391,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -16404,6 +16408,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -16412,6 +16417,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -16440,7 +16446,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object shutdown: @@ -18086,14 +18092,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -18103,6 +18109,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -18111,6 +18118,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -18139,7 +18147,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object tolerations: @@ -18422,7 +18430,7 @@ spec: enum: - ASCII - AlphaNumeric - maxLength: 15 + maxLength: 12 type: string required: - type @@ -18511,6 +18519,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. @@ -26898,7 +26907,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -31779,7 +31788,7 @@ spec: - INFO - DEBUG - NOTSET - maxLength: 10 + maxLength: 8 type: string storageLimit: description: |- @@ -31824,7 +31833,7 @@ spec: enum: - Switchover - Failover - maxLength: 15 + maxLength: 10 type: string required: - enabled @@ -34738,14 +34747,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -34755,6 +34764,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -34763,6 +34773,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -34791,7 +34802,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object sidecars: @@ -35139,14 +35150,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -35156,6 +35167,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -35164,6 +35176,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -35192,7 +35205,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object service: @@ -35204,14 +35217,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -35221,6 +35234,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -35229,6 +35243,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -35257,7 +35272,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object shutdown: @@ -36903,14 +36918,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -36920,6 +36935,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -36928,6 +36944,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -36956,7 +36973,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object tolerations: @@ -37236,7 +37253,7 @@ spec: enum: - ASCII - AlphaNumeric - maxLength: 15 + maxLength: 12 type: string required: - type @@ -37302,6 +37319,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. diff --git a/internal/crd/post-process.jq b/internal/crd/post-process.jq index 935ab09a8..41b2faa29 100644 --- a/internal/crd/post-process.jq +++ b/internal/crd/post-process.jq @@ -10,14 +10,20 @@ # https://jqlang.org/manual#multiplication-division-modulo def merge(stream): reduce stream as $i ({}; . * $i); +# Kubernetes assumes the evaluation cost of an enum value is very large: https://issue.k8s.io/119511 +# Look at every schema that has a populated "enum" property. +reduce paths(try .enum | length > 0) as $path (.; + getpath($path) as $schema | + setpath($path; $schema + { maxLength: ($schema.enum | map(length) | max) }) +) | + # Kubernetes does not consider "allOf" when estimating CEL cost: https://issue.k8s.io/134029 # controller-gen might produce "allOf" when combining markers: # https://github.com/kubernetes-sigs/controller-tools/issues/1270 # # This (partially) addresses both by keeping only the smallest max, largest min, etc. -# -# Look at every schema that has an "allOf" property. -reduce paths(try .allOf) as $path (.; +# Look at every schema that has a populated "allOf" property. +reduce paths(try .allOf | length > 0) as $path (.; ( getpath($path) | merge( ., diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index d2f38441d..36177d1e4 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -94,11 +94,6 @@ type PostgresClusterSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go index 89b464a24..c8606d6e8 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go @@ -52,10 +52,6 @@ type CrunchyBridgeClusterSpec struct { // The cloud provider where the cluster is located. // Currently Bridge offers aws, azure, and gcp only // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // // +kubebuilder:validation:Required // +kubebuilder:validation:Enum={aws,azure,gcp} // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go index 5ab1b2792..3d4d9bda5 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go @@ -68,10 +68,6 @@ type PatroniLogConfig struct { // The Patroni log level. // More info: https://docs.python.org/3/library/logging.html#levels // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // // +default="INFO" // +kubebuilder:validation:Enum={CRITICAL,ERROR,WARNING,INFO,DEBUG,NOTSET} // +optional @@ -96,10 +92,6 @@ type PatroniSwitchover struct { // factors. A TargetInstance must be specified to failover. // NOTE: The Failover type is reserved as the "last resort" case. // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:validation:Enum={Switchover,Failover} // +kubebuilder:default:=Switchover // +optional diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go index 805ce1a16..a7f40dc83 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go @@ -29,11 +29,6 @@ type PGUpgradeSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` @@ -110,10 +105,6 @@ type PGUpgradeSettings struct { // - Clone since 12: https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/pg_upgrade/pg_upgrade.h;hb=REL_12_0#l232 // - CopyFileRange since 17: https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/pg_upgrade/pg_upgrade.h;hb=REL_17_0#l251 // - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:validation:Enum={Clone,Copy,CopyFileRange,Link} // +optional TransferMethod string `json:"transferMethod,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go index 06658065b..2880c565e 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go @@ -175,10 +175,6 @@ type PostgresPasswordSpec struct { // "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. // "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set. // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:default=ASCII // +kubebuilder:validation:Enum={ASCII,AlphaNumeric} // +required diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 525f772d9..60a65d323 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -81,11 +81,6 @@ type PostgresClusterSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index c6351ca86..48a192cb8 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -244,10 +244,6 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +optional // +kubebuilder:default=ClusterIP // +kubebuilder:validation:Enum={ClusterIP,NodePort,LoadBalancer} @@ -265,11 +261,6 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // +kubebuilder:validation:Type=string - // // +optional // +kubebuilder:validation:Enum={Cluster,Local} InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` @@ -277,10 +268,6 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies // --- // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // +kubebuilder:validation:Type=string - // // +optional // +kubebuilder:validation:Enum={Cluster,Local} ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go index eacf54e36..4b88f1272 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go @@ -105,11 +105,6 @@ type PGAdminSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` @@ -207,10 +202,6 @@ type PGAdminUser struct { // Role determines whether the user has admin privileges or not. // Defaults to User. Valid options are Administrator and User. // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:validation:Enum={Administrator,User} // +optional Role string `json:"role,omitempty"` From af97e79d21333085253f8d4ec17a917d955d6761 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Thu, 12 Dec 2024 13:35:19 -0600 Subject: [PATCH 10/43] Separate postgrescluster.Reconciler client concerns We want to be mindful of our interactions with the Kubernetes API, and these interfaces will help keep functions focused. These interfaces are also narrower than client.Reader and client.Writer and may help us keep RBAC markers accurate. A new constructor populates these fields with a single client.Client. The client.WithFieldOwner constructor allows us to drop our Owner field and patch method. This allows `make check` to cover 9% more of the "postgrescluster" package. --- cmd/postgres-operator/main.go | 19 +- internal/controller/postgrescluster/apply.go | 8 +- .../controller/postgrescluster/apply_test.go | 33 ++-- .../postgrescluster/cluster_test.go | 50 +++-- .../controller/postgrescluster/controller.go | 69 +++---- .../postgrescluster/controller_ref_manager.go | 13 +- .../controller_ref_manager_test.go | 13 +- .../postgrescluster/controller_test.go | 27 +-- internal/controller/postgrescluster/delete.go | 4 +- .../controller/postgrescluster/instance.go | 20 +- .../postgrescluster/instance_rollout_test.go | 13 +- .../postgrescluster/instance_test.go | 44 ++--- .../controller/postgrescluster/patroni.go | 8 +- .../postgrescluster/patroni_test.go | 19 +- .../controller/postgrescluster/pgadmin.go | 14 +- .../postgrescluster/pgadmin_test.go | 30 ++- .../controller/postgrescluster/pgbackrest.go | 54 +++--- .../postgrescluster/pgbackrest_test.go | 107 +++++------ .../controller/postgrescluster/pgbouncer.go | 10 +- .../postgrescluster/pgbouncer_test.go | 28 ++- .../controller/postgrescluster/pgmonitor.go | 6 +- .../postgrescluster/pgmonitor_test.go | 10 +- internal/controller/postgrescluster/pki.go | 4 +- .../controller/postgrescluster/pki_test.go | 4 +- .../pod_disruption_budget_test.go | 5 +- .../controller/postgrescluster/postgres.go | 6 +- .../postgrescluster/postgres_test.go | 17 +- .../controller/postgrescluster/snapshots.go | 12 +- .../postgrescluster/snapshots_test.go | 179 +++++++++--------- .../controller/postgrescluster/volumes.go | 14 +- .../postgrescluster/volumes_test.go | 12 +- 31 files changed, 414 insertions(+), 438 deletions(-) diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index dd321d554..50ac74943 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -34,7 +34,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/tracing" "github.com/crunchydata/postgres-operator/internal/upgradecheck" @@ -256,8 +255,8 @@ func main() { } // add all PostgreSQL Operator controllers to the runtime manager - addControllersToManager(manager, log, registrar) must(pgupgrade.ManagedReconciler(manager, registrar)) + must(postgrescluster.ManagedReconciler(manager, registrar)) must(standalone_pgadmin.ManagedReconciler(manager)) must(crunchybridgecluster.ManagedReconciler(manager, func() bridge.ClientInterface { return bridgeClient() @@ -306,19 +305,3 @@ func main() { log.Info("shutdown complete") } } - -// addControllersToManager adds all PostgreSQL Operator controllers to the provided controller -// runtime manager. -func addControllersToManager(mgr runtime.Manager, log logging.Logger, reg registration.Registration) { - pgReconciler := &postgrescluster.Reconciler{ - Client: mgr.GetClient(), - Owner: naming.ControllerPostgresCluster, - Recorder: mgr.GetEventRecorderFor(naming.ControllerPostgresCluster), - Registration: reg, - } - - if err := pgReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create PostgresCluster controller") - os.Exit(1) - } -} diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index ce3d2fb9e..88659cf39 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -16,8 +16,8 @@ import ( ) // apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set to -// r.Owner and the force parameter is true. +// updates object with any returned content. The fieldManager is set by +// r.Writer and the force parameter is true. // - https://docs.k8s.io/reference/using-api/server-side-apply/#managers // - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts func (r *Reconciler) apply(ctx context.Context, object client.Object) error { @@ -32,7 +32,7 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { // Send the apply-patch with force=true. if err == nil { - err = r.patch(ctx, object, apply, client.ForceOwnership) + err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) } // Some fields cannot be server-side applied correctly. When their outcome @@ -44,7 +44,7 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { // Send the json-patch when necessary. if err == nil && !patch.IsEmpty() { - err = r.patch(ctx, object, patch) + err = r.Writer.Patch(ctx, object, patch) } return err } diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index a1fa6b7f1..e06f29059 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -44,7 +44,8 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, err) t.Run("ObjectMeta", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} constructor := func() *corev1.ConfigMap { var cm corev1.ConfigMap cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) @@ -55,7 +56,7 @@ func TestServerSideApply(t *testing.T) { // Create the object. before := constructor() - assert.NilError(t, cc.Patch(ctx, before, client.Apply, reconciler.Owner)) + assert.NilError(t, cc.Patch(ctx, before, client.Apply)) assert.Assert(t, before.GetResourceVersion() != "") // Allow the Kubernetes API clock to advance. @@ -63,7 +64,7 @@ func TestServerSideApply(t *testing.T) { // client.Apply changes the ResourceVersion inadvertently. after := constructor() - assert.NilError(t, cc.Patch(ctx, after, client.Apply, reconciler.Owner)) + assert.NilError(t, cc.Patch(ctx, after, client.Apply)) assert.Assert(t, after.GetResourceVersion() != "") switch { @@ -87,7 +88,8 @@ func TestServerSideApply(t *testing.T) { }) t.Run("ControllerReference", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} // Setup two possible controllers. controller1 := new(corev1.ConfigMap) @@ -115,7 +117,7 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, controllerutil.SetControllerReference(controller2, applied, cc.Scheme())) - err1 := cc.Patch(ctx, applied, client.Apply, client.ForceOwnership, reconciler.Owner) + err1 := cc.Patch(ctx, applied, client.Apply, client.ForceOwnership) // Patch not accepted; the ownerReferences field is invalid. assert.Assert(t, apierrors.IsInvalid(err1), "got %#v", err1) @@ -155,20 +157,21 @@ func TestServerSideApply(t *testing.T) { return &sts } - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} upstream := constructor("status-upstream") // The structs defined in "k8s.io/api/apps/v1" marshal empty status fields. switch { case serverVersion.LessThan(version.MustParseGeneric("1.22")): assert.ErrorContains(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner), + cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership), "field not declared in schema", "expected https://issue.k8s.io/109210") default: assert.NilError(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership)) } // Our apply method generates the correct apply-patch. @@ -188,7 +191,8 @@ func TestServerSideApply(t *testing.T) { } t.Run("wrong-keys", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} intent := constructor("some-selector") intent.Spec.Selector = map[string]string{"k1": "v1"} @@ -196,7 +200,7 @@ func TestServerSideApply(t *testing.T) { // Create the Service. before := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, before, client.Apply, client.ForceOwnership)) // Something external mucks it up. assert.NilError(t, @@ -207,7 +211,7 @@ func TestServerSideApply(t *testing.T) { // client.Apply cannot correct it in old versions of Kubernetes. after := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, after, client.Apply, client.ForceOwnership)) switch { case serverVersion.LessThan(version.MustParseGeneric("1.22")): @@ -249,7 +253,8 @@ func TestServerSideApply(t *testing.T) { {"empty", make(map[string]string)}, } { t.Run(tt.name, func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} intent := constructor(tt.name + "-selector") intent.Spec.Selector = tt.selector @@ -257,7 +262,7 @@ func TestServerSideApply(t *testing.T) { // Create the Service. before := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, before, client.Apply, client.ForceOwnership)) // Something external mucks it up. assert.NilError(t, @@ -268,7 +273,7 @@ func TestServerSideApply(t *testing.T) { // client.Apply cannot correct it. after := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, after, client.Apply, client.ForceOwnership)) assert.Assert(t, len(after.Spec.Selector) != len(intent.Spec.Selector), "got %v", after.Spec.Selector) diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index b819291ae..c56947a83 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -82,19 +82,20 @@ func TestCustomLabels(t *testing.T) { require.ParallelCapacity(t, 2) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) @@ -168,7 +169,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -216,7 +217,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -263,7 +264,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -298,7 +299,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -320,19 +321,20 @@ func TestCustomAnnotations(t *testing.T) { require.ParallelCapacity(t, 2) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) @@ -407,7 +409,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -455,7 +457,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -502,7 +504,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -537,7 +539,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -554,10 +556,7 @@ func TestCustomAnnotations(t *testing.T) { } func TestGenerateClusterPrimaryService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns2" @@ -658,7 +657,7 @@ func TestReconcileClusterPrimaryService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{Writer: client.WithFieldOwner(cc, t.Name())} cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -676,10 +675,7 @@ func TestReconcileClusterPrimaryService(t *testing.T) { } func TestGenerateClusterReplicaServiceIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns1" diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 5cd347a7f..09ddf1583 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -45,12 +45,25 @@ const controllerName = naming.ControllerPostgresCluster // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { - Client client.Client - Owner client.FieldOwner PodExec func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error + + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + List(context.Context, client.ObjectList, ...client.ListOption) error + } + Writer interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error + DeleteAllOf(context.Context, client.Object, ...client.DeleteAllOfOption) error + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + Update(context.Context, client.Object, ...client.UpdateOption) error + } + StatusWriter interface { + Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error + } + Recorder record.EventRecorder Registration registration.Registration } @@ -69,7 +82,7 @@ func (r *Reconciler) Reconcile( // get the postgrescluster from the cache cluster := &v1beta1.PostgresCluster{} - if err := r.Client.Get(ctx, request.NamespacedName, cluster); err != nil { + if err := r.Reader.Get(ctx, request.NamespacedName, cluster); err != nil { // NotFound cannot be fixed by requeuing so ignore it. During background // deletion, we receive delete events from cluster's dependents after // cluster is deleted. @@ -175,8 +188,7 @@ func (r *Reconciler) Reconcile( if !equality.Semantic.DeepEqual(before.Status, cluster.Status) { // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track // managed fields on the status subresource: https://issue.k8s.io/88901 - if err := r.Client.Status().Patch( - ctx, cluster, client.MergeFrom(before), r.Owner); err != nil { + if err := r.StatusWriter.Patch(ctx, cluster, client.MergeFrom(before)); err != nil { log.Error(err, "patching cluster status") return err } @@ -400,24 +412,12 @@ func (r *Reconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Client.Delete(ctx, object, exactly) + return r.Writer.Delete(ctx, object, exactly) } return nil } -// patch sends patch to object's endpoint in the Kubernetes API and updates -// object with any returned content. The fieldManager is set to r.Owner, but -// can be overridden in options. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -func (r *Reconciler) patch( - ctx context.Context, object client.Object, - patch client.Patch, options ...client.PatchOption, -) error { - options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) -} - // The owner reference created by controllerutil.SetControllerReference blocks // deletion. The OwnerReferencesPermissionEnforcement plugin requires that the // creator of such a reference have either "delete" permission on the owner or @@ -431,7 +431,7 @@ func (r *Reconciler) patch( func (r *Reconciler) setControllerReference( owner *v1beta1.PostgresCluster, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) } // setOwnerReference sets an OwnerReference on the object without setting the @@ -439,7 +439,7 @@ func (r *Reconciler) setControllerReference( func (r *Reconciler) setOwnerReference( owner *v1beta1.PostgresCluster, controlled client.Object, ) error { - return controllerutil.SetOwnerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetOwnerReference(owner, controlled, runtime.Scheme) } // +kubebuilder:rbac:groups="",resources="configmaps",verbs={get,list,watch} @@ -456,17 +456,22 @@ func (r *Reconciler) setOwnerReference( // +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={get,list,watch} // +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={get,list,watch} -// SetupWithManager adds the PostgresCluster controller to the provided runtime manager -func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { - if r.PodExec == nil { - var err error - r.PodExec, err = runtime.NewPodExecutor(mgr.GetConfig()) - if err != nil { - return err - } +// ManagedReconciler creates a [Reconciler] and adds it to m. +func ManagedReconciler(m manager.Manager, r registration.Registration) error { + exec, err := runtime.NewPodExecutor(m.GetConfig()) + kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPostgresCluster) + recorder := m.GetEventRecorderFor(naming.ControllerPostgresCluster) + + reconciler := &Reconciler{ + PodExec: exec, + Reader: kubernetes, + Recorder: recorder, + Registration: r, + StatusWriter: kubernetes.Status(), + Writer: kubernetes, } - return builder.ControllerManagedBy(mgr). + return errors.Join(err, builder.ControllerManagedBy(m). For(&v1beta1.PostgresCluster{}). Owns(&corev1.ConfigMap{}). Owns(&corev1.Endpoints{}). @@ -481,8 +486,8 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { Owns(&rbacv1.RoleBinding{}). Owns(&batchv1.CronJob{}). Owns(&policyv1.PodDisruptionBudget{}). - Watches(&corev1.Pod{}, r.watchPods()). + Watches(&corev1.Pod{}, reconciler.watchPods()). Watches(&appsv1.StatefulSet{}, - r.controllerRefHandlerFuncs()). // watch all StatefulSets - Complete(r) + reconciler.controllerRefHandlerFuncs()). // watch all StatefulSets + Complete(reconciler)) } diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index 6caa58b85..fc814259b 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -12,7 +12,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -28,8 +27,7 @@ import ( func (r *Reconciler) adoptObject(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, obj client.Object) error { - if err := controllerutil.SetControllerReference(postgresCluster, obj, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, obj); err != nil { return err } @@ -39,10 +37,7 @@ func (r *Reconciler) adoptObject(ctx context.Context, postgresCluster *v1beta1.P return err } - return r.Client.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, - patchBytes), &client.PatchOptions{ - FieldManager: controllerName, - }) + return r.Writer.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patchBytes)) } // claimObject is responsible for adopting or releasing Objects based on their current @@ -129,7 +124,7 @@ func (r *Reconciler) getPostgresClusterForObject(ctx context.Context, } postgresCluster := &v1beta1.PostgresCluster{} - if err := r.Client.Get(ctx, types.NamespacedName{ + if err := r.Reader.Get(ctx, types.NamespacedName{ Name: clusterName, Namespace: obj.GetNamespace(), }, postgresCluster); err != nil { @@ -175,7 +170,7 @@ func (r *Reconciler) releaseObject(ctx context.Context, return err } - return r.Client.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patch)) + return r.Writer.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patch)) } // controllerRefHandlerFuncs returns the handler funcs that should be utilized to watch diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index 7a60e4138..2d8432856 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -22,7 +22,10 @@ func TestManageControllerRefs(t *testing.T) { require.ParallelCapacity(t, 1) ctx := context.Background() - r := &Reconciler{Client: tClient} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } clusterName := "hippo" cluster := testCluster() @@ -59,7 +62,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "adopt" obj.Labels = map[string]string{naming.LabelCluster: clusterName} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -100,7 +103,7 @@ func TestManageControllerRefs(t *testing.T) { BlockOwnerDeletion: &isTrue, }) - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -123,7 +126,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "ignore-no-labels-refs" obj.Labels = map[string]string{"ignore-label": "ignore-value"} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -146,7 +149,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "ignore-no-postgrescluster" obj.Labels = map[string]string{naming.LabelCluster: "nonexistent"} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 36759cd78..a6f237b81 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -39,7 +39,7 @@ func TestDeleteControlled(t *testing.T) { require.ParallelCapacity(t, 1) ns := setupNamespace(t, cc) - reconciler := Reconciler{Client: cc} + reconciler := Reconciler{Writer: cc} cluster := testCluster() cluster.Namespace = ns.Name @@ -118,6 +118,7 @@ spec: var _ = Describe("PostgresCluster Reconciler", func() { var test struct { Namespace *corev1.Namespace + Owner string Reconciler Reconciler Recorder *record.FakeRecorder } @@ -129,13 +130,17 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Namespace.Name = "postgres-operator-test-" + rand.String(6) Expect(suite.Client.Create(ctx, test.Namespace)).To(Succeed()) + test.Owner = "asdf" test.Recorder = record.NewFakeRecorder(100) test.Recorder.IncludeObject = true - test.Reconciler.Client = suite.Client - test.Reconciler.Owner = "asdf" + client := client.WithFieldOwner(suite.Client, test.Owner) + + test.Reconciler.Reader = client test.Reconciler.Recorder = test.Recorder test.Reconciler.Registration = nil + test.Reconciler.StatusWriter = client.Status() + test.Reconciler.Writer = client }) AfterEach(func() { @@ -284,7 +289,7 @@ spec: )) Expect(ccm.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -308,7 +313,7 @@ spec: )) Expect(cps.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -347,7 +352,7 @@ spec: // - https://pr.k8s.io/100970 Expect(existing.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "FieldsV1": PointTo(MatchAllFields(Fields{ "Raw": WithTransform(func(in []byte) (out map[string]any) { Expect(yaml.Unmarshal(in, &out)).To(Succeed()) @@ -365,7 +370,7 @@ spec: default: Expect(existing.ManagedFields).To(ContainElements( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "FieldsV1": PointTo(MatchAllFields(Fields{ "Raw": WithTransform(func(in []byte) (out map[string]any) { Expect(yaml.Unmarshal(in, &out)).To(Succeed()) @@ -378,7 +383,7 @@ spec: })), }), MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "FieldsV1": PointTo(MatchAllFields(Fields{ "Raw": WithTransform(func(in []byte) (out map[string]any) { Expect(yaml.Unmarshal(in, &out)).To(Succeed()) @@ -409,7 +414,7 @@ spec: )) Expect(ds.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -501,7 +506,7 @@ spec: )) Expect(icm.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -522,7 +527,7 @@ spec: )) Expect(instance.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) diff --git a/internal/controller/postgrescluster/delete.go b/internal/controller/postgrescluster/delete.go index a1a4d322d..74a786dd3 100644 --- a/internal/controller/postgrescluster/delete.go +++ b/internal/controller/postgrescluster/delete.go @@ -58,7 +58,7 @@ func (r *Reconciler) handleDelete( // Make another copy so that Patch doesn't write back to cluster. intent := before.DeepCopy() intent.Finalizers = append(intent.Finalizers, naming.Finalizer) - err := errors.WithStack(r.patch(ctx, intent, + err := errors.WithStack(r.Writer.Patch(ctx, intent, client.MergeFromWithOptions(before, client.MergeFromWithOptimisticLock{}))) // The caller can do what they like or requeue upon error. @@ -96,7 +96,7 @@ func (r *Reconciler) handleDelete( // Make another copy so that Patch doesn't write back to cluster. intent := before.DeepCopy() intent.Finalizers = finalizers.Delete(naming.Finalizer).List() - err := errors.WithStack(r.patch(ctx, intent, + err := errors.WithStack(r.Writer.Patch(ctx, intent, client.MergeFromWithOptions(before, client.MergeFromWithOptimisticLock{}))) // The caller should wait for further events or requeue upon error. diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index b000074e5..3b0b9b58f 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -300,14 +300,14 @@ func (r *Reconciler) observeInstances( selector, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) } if err == nil { err = errors.WithStack( - r.Client.List(ctx, runners, + r.Reader.List(ctx, runners, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -418,7 +418,7 @@ func (r *Reconciler) deleteInstances( instances, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: instances}, )) @@ -456,7 +456,7 @@ func (r *Reconciler) deleteInstances( // apps/v1.Deployment, apps/v1.ReplicaSet, and apps/v1.StatefulSet all // have a "spec.replicas" field with the same meaning. patch := client.RawPatch(client.Merge.Type(), []byte(`{"spec":{"replicas":0}}`)) - err := errors.WithStack(r.patch(ctx, instance, patch)) + err := errors.WithStack(r.Writer.Patch(ctx, instance, patch)) // When the pod controller is missing, requeue rather than return an // error. The garbage collector will stop the pod, and it is not our @@ -532,7 +532,7 @@ func (r *Reconciler) deleteInstance( uList.SetGroupVersionKind(gvk) err = errors.WithStack( - r.Client.List(ctx, uList, + r.Reader.List(ctx, uList, client.InNamespace(cluster.GetNamespace()), client.MatchingLabelsSelector{Selector: selector}, )) @@ -650,7 +650,7 @@ func (r *Reconciler) cleanupPodDisruptionBudgets( pdbList := &policyv1.PodDisruptionBudgetList{} if err == nil { - err = r.Client.List(ctx, pdbList, + err = r.Reader.List(ctx, pdbList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{ Selector: selector, }) @@ -847,7 +847,7 @@ func (r *Reconciler) rolloutInstance( // NOTE(cbandy): This could return an apierrors.IsConflict() which should be // retried by another reconcile (not ignored). return errors.WithStack( - r.Client.Delete(ctx, pod, client.Preconditions{ + r.Writer.Delete(ctx, pod, client.Preconditions{ UID: &pod.UID, ResourceVersion: &pod.ResourceVersion, })) @@ -1188,7 +1188,7 @@ func (r *Reconciler) reconcileInstance( // Create new err variable to avoid abandoning the rest of the reconcile loop if there // is an error getting the monitoring user secret err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(monitoringUserSecret), monitoringUserSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(monitoringUserSecret), monitoringUserSecret)) if err == nil { pgPassword = string(monitoringUserSecret.Data["password"]) } @@ -1459,7 +1459,7 @@ func (r *Reconciler) reconcileInstanceCertificates( ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) instanceCerts := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} instanceCerts.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) @@ -1547,7 +1547,7 @@ func (r *Reconciler) reconcileInstanceSetPodDisruptionBudget( scaled, err = intstr.GetScaledValueFromIntOrPercent(minAvailable, int(*spec.Replicas), true) } if err == nil && scaled <= 0 { - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), pdb)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(pdb), pdb)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pdb)) } diff --git a/internal/controller/postgrescluster/instance_rollout_test.go b/internal/controller/postgrescluster/instance_rollout_test.go index 7bd63ce9d..2b8f0db5f 100644 --- a/internal/controller/postgrescluster/instance_rollout_test.go +++ b/internal/controller/postgrescluster/instance_rollout_test.go @@ -57,9 +57,12 @@ func TestReconcilerRolloutInstance(t *testing.T) { } observed := &observedInstances{forCluster: instances} - key := client.ObjectKey{Namespace: "ns1", Name: "one-pod-bruh"} - reconciler := &Reconciler{} - reconciler.Client = fake.NewClientBuilder().WithObjects(instances[0].Pods[0]).Build() + cc := fake.NewClientBuilder().WithObjects(instances[0].Pods[0]).Build() + key := client.ObjectKeyFromObject(instances[0].Pods[0]) + reconciler := &Reconciler{ + Reader: cc, + Writer: cc, + } execCalls := 0 reconciler.PodExec = func( @@ -82,13 +85,13 @@ func TestReconcilerRolloutInstance(t *testing.T) { return nil } - assert.NilError(t, reconciler.Client.Get(ctx, key, &corev1.Pod{}), + assert.NilError(t, cc.Get(ctx, key, &corev1.Pod{}), "bug in test: expected pod to exist") assert.NilError(t, reconciler.rolloutInstance(ctx, cluster, observed, instances[0])) assert.Equal(t, execCalls, 1, "expected PodExec to be called") - err := reconciler.Client.Get(ctx, key, &corev1.Pod{}) + err := cc.Get(ctx, key, &corev1.Pod{}) assert.Assert(t, apierrors.IsNotFound(err), "expected pod to be deleted, got: %#v", err) }) diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 1d17e4f9f..8a028913e 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1213,20 +1213,21 @@ func TestDeleteInstance(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } // Define, Create, and Reconcile a cluster to get an instance running in kube cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) @@ -1239,7 +1240,7 @@ func TestDeleteInstance(t *testing.T) { assert.Assert(t, result.Requeue == false) stsList := &appsv1.StatefulSetList{} - assert.NilError(t, reconciler.Client.List(ctx, stsList, + assert.NilError(t, cc.List(ctx, stsList, client.InNamespace(cluster.Namespace), client.MatchingLabels{ naming.LabelCluster: cluster.Name, @@ -1272,7 +1273,7 @@ func TestDeleteInstance(t *testing.T) { err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -1816,8 +1817,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } foundPDB := func( @@ -1825,7 +1826,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec *v1beta1.PostgresInstanceSetSpec, ) bool { got := &policyv1.PodDisruptionBudget{} - err := r.Client.Get(ctx, + err := cc.Get(ctx, naming.AsObjectKey(naming.InstanceSet(cluster, spec)), got) return !apierrors.IsNotFound(err) @@ -1857,8 +1858,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) @@ -1884,8 +1885,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) @@ -1934,8 +1935,8 @@ func TestCleanupDisruptionBudgets(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -1964,14 +1965,14 @@ func TestCleanupDisruptionBudgets(t *testing.T) { createPDB := func( pdb *policyv1.PodDisruptionBudget, ) error { - return r.Client.Create(ctx, pdb) + return cc.Create(ctx, pdb) } foundPDB := func( pdb *policyv1.PodDisruptionBudget, ) bool { return !apierrors.IsNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), + cc.Get(ctx, client.ObjectKeyFromObject(pdb), &policyv1.PodDisruptionBudget{})) } @@ -1986,8 +1987,8 @@ func TestCleanupDisruptionBudgets(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) expectedPDB := generatePDB(t, cluster, spec, initialize.Pointer(intstr.FromInt32(1))) @@ -2031,8 +2032,7 @@ func TestReconcileInstanceConfigMap(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } t.Run("LocalVolumeOtelDisabled", func(t *testing.T) { diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index af3a3b8cc..7368fe295 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -37,7 +37,7 @@ func (r *Reconciler) deletePatroniArtifacts( selector, err := naming.AsSelector(naming.ClusterPatronis(cluster)) if err == nil { err = errors.WithStack( - r.Client.DeleteAllOf(ctx, &corev1.Endpoints{}, + r.Writer.DeleteAllOf(ctx, &corev1.Endpoints{}, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -324,7 +324,7 @@ func (r *Reconciler) reconcilePatroniStatus( dcs := &corev1.Endpoints{ObjectMeta: naming.PatroniDistributedConfiguration(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(dcs), dcs))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(dcs), dcs))) if err == nil { if dcs.Annotations["initialize"] != "" { @@ -362,14 +362,14 @@ func (r *Reconciler) reconcileReplicationSecret( Name: cluster.Spec.CustomReplicationClientTLSSecret.Name, Namespace: cluster.Namespace, }} - err := errors.WithStack(r.Client.Get(ctx, + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(custom), custom)) return custom, err } existing := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) leaf := &pki.LeafCertificate{} commonName := postgres.ReplicationUser diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index b7fe88530..6968bd325 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -32,11 +32,7 @@ import ( ) func TestGeneratePatroniLeaderLeaseService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -232,7 +228,7 @@ func TestReconcilePatroniLeaderLease(t *testing.T) { require.ParallelCapacity(t, 1) ns := setupNamespace(t, cc) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{Writer: client.WithFieldOwner(cc, t.Name())} cluster := testCluster() cluster.Namespace = ns.Name @@ -322,7 +318,10 @@ func TestPatroniReplicationSecret(t *testing.T) { require.ParallelCapacity(t, 0) ctx := context.Background() - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } // test postgrescluster values var ( @@ -351,7 +350,7 @@ func TestPatroniReplicationSecret(t *testing.T) { patroniReplicationSecret := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(postgresCluster)} patroniReplicationSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - err = r.Client.Get(ctx, client.ObjectKeyFromObject(patroniReplicationSecret), patroniReplicationSecret) + err = tClient.Get(ctx, client.ObjectKeyFromObject(patroniReplicationSecret), patroniReplicationSecret) assert.NilError(t, err) t.Run("ca.crt", func(t *testing.T) { @@ -426,7 +425,7 @@ func TestReconcilePatroniStatus(t *testing.T) { require.ParallelCapacity(t, 0) ns := setupNamespace(t, tClient) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} systemIdentifier := "6952526174828511264" createResources := func(index, readyReplicas int, @@ -526,13 +525,9 @@ func TestReconcilePatroniStatus(t *testing.T) { } func TestReconcilePatroniSwitchover(t *testing.T) { - _, client := setupKubernetes(t) - require.ParallelCapacity(t, 0) - var called, failover, callError, callFails bool var timelineCallNoLeader, timelineCall bool r := Reconciler{ - Client: client, PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { called = true diff --git a/internal/controller/postgrescluster/pgadmin.go b/internal/controller/postgrescluster/pgadmin.go index dbaaf359e..fe5d4ce21 100644 --- a/internal/controller/postgrescluster/pgadmin.go +++ b/internal/controller/postgrescluster/pgadmin.go @@ -96,7 +96,7 @@ func (r *Reconciler) reconcilePGAdminConfigMap( // pgAdmin is disabled; delete the ConfigMap if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(configmap) - err := errors.WithStack(r.Client.Get(ctx, key, configmap)) + err := errors.WithStack(r.Reader.Get(ctx, key, configmap)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, configmap)) } @@ -212,7 +212,7 @@ func (r *Reconciler) reconcilePGAdminService( // pgAdmin is disabled; delete the Service if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(service) - err := errors.WithStack(r.Client.Get(ctx, key, service)) + err := errors.WithStack(r.Reader.Get(ctx, key, service)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, service)) } @@ -240,7 +240,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // pgAdmin is disabled; delete the Deployment if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(sts) - err := errors.WithStack(r.Client.Get(ctx, key, sts)) + err := errors.WithStack(r.Reader.Get(ctx, key, sts)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, sts)) } @@ -333,7 +333,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { if !apierrors.IsNotFound(err) { return err } @@ -346,7 +346,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + return errors.WithStack(client.IgnoreNotFound(r.Writer.Delete(ctx, existing, exactly, propagate))) } } @@ -391,7 +391,7 @@ func (r *Reconciler) reconcilePGAdminDataVolume( // pgAdmin is disabled; delete the PVC if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) } @@ -439,7 +439,7 @@ func (r *Reconciler) reconcilePGAdminUsers( pod := &corev1.Pod{ObjectMeta: naming.ClusterPGAdmin(cluster)} pod.Name += "-0" - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(pod), pod)) if err != nil { return client.IgnoreNotFound(err) } diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index fc585d895..bb81d90cf 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -29,10 +29,7 @@ import ( ) func TestGeneratePGAdminConfigMap(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "some-ns" @@ -118,11 +115,7 @@ ownerReferences: } func TestGeneratePGAdminService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -354,7 +347,10 @@ func TestReconcilePGAdminService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -456,7 +452,10 @@ func TestReconcilePGAdminStatefulSet(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } ns := setupNamespace(t, cc) cluster := pgAdminTestCluster(*ns) @@ -670,8 +669,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(tClient, t.Name()), } ns := setupNamespace(t, tClient) @@ -721,7 +719,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { t.Run("NoPods", func(t *testing.T) { r := new(Reconciler) - r.Client = fake.NewClientBuilder().Build() + r.Reader = fake.NewClientBuilder().Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -737,7 +735,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { pod.Status.ContainerStatuses = nil r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -757,7 +755,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { new(corev1.ContainerStateRunning) r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -773,7 +771,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { new(corev1.ContainerStateRunning) r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() calls := 0 r.PodExec = func( diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index b4067a83f..e4e1a06d3 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -139,7 +139,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { + if err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { if !apierrors.IsNotFound(err) { return nil, err } @@ -152,7 +152,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return repo, errors.WithStack(r.Client.Delete(ctx, existing, exactly, propagate)) + return repo, errors.WithStack(r.Writer.Delete(ctx, existing, exactly, propagate)) } } @@ -250,7 +250,7 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - if err := r.Client.List(ctx, uList, + if err := r.Reader.List(ctx, uList, client.InNamespace(postgresCluster.GetNamespace()), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, errors.WithStack(err) @@ -400,7 +400,7 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, // If nothing has specified that the resource should not be deleted, then delete if delete { - if err := r.Client.Delete(ctx, &ownedResources[i], + if err := r.Writer.Delete(ctx, &ownedResources[i], client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return []unstructured.Unstructured{}, errors.WithStack(err) } @@ -947,7 +947,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, // lookup the various patroni endpoints leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} currentEndpoints := []corev1.Endpoints{} - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -955,7 +955,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } else { currentEndpoints = append(currentEndpoints, leaderEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), &dcsEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -963,7 +963,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } else { currentEndpoints = append(currentEndpoints, dcsEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), &failoverEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -973,7 +973,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } restoreJobs := &batchv1.JobList{} - if err := r.Client.List(ctx, restoreJobs, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), }); err != nil { @@ -1021,26 +1021,26 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, // by the restore job. Clean them up if they still exist. selector := naming.PGBackRestRestoreConfigSelector(cluster.GetName()) restoreConfigMaps := &corev1.ConfigMapList{} - if err := r.Client.List(ctx, restoreConfigMaps, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreConfigMaps, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: selector, }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreConfigMaps.Items { - if err := r.Client.Delete(ctx, &restoreConfigMaps.Items[i]); err != nil { + if err := r.Writer.Delete(ctx, &restoreConfigMaps.Items[i]); err != nil { return nil, nil, errors.WithStack(err) } } restoreSecrets := &corev1.SecretList{} - if err := r.Client.List(ctx, restoreSecrets, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreSecrets, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: selector, }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreSecrets.Items { - if err := r.Client.Delete(ctx, &restoreSecrets.Items[i]); err != nil { + if err := r.Writer.Delete(ctx, &restoreSecrets.Items[i]); err != nil { return nil, nil, errors.WithStack(err) } } @@ -1132,7 +1132,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, // remove any existing restore Jobs if restoreJob != nil { setPreparingClusterCondition("removing restore job") - if err := r.Client.Delete(ctx, restoreJob, + if err := r.Writer.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return errors.WithStack(err) } @@ -1142,7 +1142,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, if clusterRunning { setPreparingClusterCondition("removing runners") for _, runner := range runners { - err := r.Client.Delete(ctx, runner, + err := r.Writer.Delete(ctx, runner, client.PropagationPolicy(metav1.DeletePropagationForeground)) if client.IgnoreNotFound(err) != nil { return errors.WithStack(err) @@ -1173,7 +1173,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, setPreparingClusterCondition("removing DCS") // delete any Endpoints for i := range currentEndpoints { - if err := r.Client.Delete(ctx, ¤tEndpoints[i]); client.IgnoreNotFound(err) != nil { + if err := r.Writer.Delete(ctx, ¤tEndpoints[i]); client.IgnoreNotFound(err) != nil { return errors.WithStack(err) } } @@ -1699,7 +1699,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, "PostgreSQL data for the cluster: %w", err) } } else { - if err := r.Client.Get(ctx, + if err := r.Reader.Get(ctx, client.ObjectKey{Name: sourceClusterName, Namespace: sourceClusterNamespace}, sourceCluster); err != nil { if apierrors.IsNotFound(err) { @@ -1901,7 +1901,7 @@ func (r *Reconciler) copyRestoreConfiguration(ctx context.Context, sourceConfig := &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(sourceCluster)} if err == nil { err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(sourceConfig), sourceConfig)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(sourceConfig), sourceConfig)) } // Retrieve the pgBackRest Secret of the source cluster if it has one. When @@ -1909,7 +1909,7 @@ func (r *Reconciler) copyRestoreConfiguration(ctx context.Context, sourceSecret := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(sourceCluster)} if err == nil { err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(sourceSecret), sourceSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(sourceSecret), sourceSecret)) if apierrors.IsNotFound(err) { sourceSecret, err = nil, nil @@ -1997,7 +1997,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, // Get the existing Secret for the copy, if it exists. It **must** // exist if not configured as optional. if secretProjection.Optional != nil && *secretProjection.Optional { - if err := errors.WithStack(r.Client.Get(ctx, secretName, + if err := errors.WithStack(r.Reader.Get(ctx, secretName, secretCopy)); apierrors.IsNotFound(err) { continue } else { @@ -2005,7 +2005,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, } } else { if err := errors.WithStack( - r.Client.Get(ctx, secretName, secretCopy)); err != nil { + r.Reader.Get(ctx, secretName, secretCopy)); err != nil { return err } } @@ -2051,7 +2051,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, // Get the existing ConfigMap for the copy, if it exists. It **must** // exist if not configured as optional. if configMapProjection.Optional != nil && *configMapProjection.Optional { - if err := errors.WithStack(r.Client.Get(ctx, configMapName, + if err := errors.WithStack(r.Reader.Get(ctx, configMapName, configMapCopy)); apierrors.IsNotFound(err) { continue } else { @@ -2059,7 +2059,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, } } else { if err := errors.WithStack( - r.Client.Get(ctx, configMapName, configMapCopy)); err != nil { + r.Reader.Get(ctx, configMapName, configMapCopy)); err != nil { return err } } @@ -2144,7 +2144,7 @@ func (r *Reconciler) reconcilePGBackRestSecret(ctx context.Context, existing := &corev1.Secret{} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(intent), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(intent), existing))) if err == nil { err = r.setControllerReference(cluster, intent) @@ -2420,7 +2420,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, // per a new value for the annotation (unless the user manually deletes the Job). if completed || failed { if manualAnnotation != "" && backupID != manualAnnotation { - return errors.WithStack(r.Client.Delete(ctx, currentBackupJob, + return errors.WithStack(r.Writer.Delete(ctx, currentBackupJob, client.PropagationPolicy(metav1.DeletePropagationBackground))) } } @@ -2693,7 +2693,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, if failed || replicaCreateRepoChanged || (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != containerName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { - if err := r.Client.Delete(ctx, job, + if err := r.Writer.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return errors.WithStack(err) } @@ -2819,7 +2819,7 @@ func (r *Reconciler) writeRepoVolumeSizeRequestStatus(ctx context.Context, pods := &corev1.PodList{} if err := errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{ Selector: naming.PGBackRestDedicatedLabels(cluster.Name).AsSelector()}, @@ -3339,7 +3339,7 @@ func (r *Reconciler) ObserveBackupUniverse(ctx context.Context, }, } err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) repoHostStatefulSetNotFound = apierrors.IsNotFound(err) // If we have an error that is not related to a missing repo-host StatefulSet, diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 715fcb2d4..b0f4d0eb8 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -181,9 +181,9 @@ func TestReconcilePGBackRest(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -681,7 +681,7 @@ func TestReconcilePGBackRestRBAC(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -740,7 +740,7 @@ func TestReconcileRepoHostRBAC(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient, Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -807,9 +807,7 @@ func TestReconcileStanzaCreate(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -928,7 +926,7 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -1089,9 +1087,8 @@ func TestReconcileManualBackup(t *testing.T) { r := &Reconciler{} _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -1527,7 +1524,10 @@ func TestGetPGBackRestResources(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } clusterName := "hippocluster" clusterUID := "hippouid" @@ -1839,9 +1839,9 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: tClient, + Reader: tClient, Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(tClient, controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2218,9 +2218,9 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: tClient, + Reader: tClient, Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(tClient, controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2394,7 +2394,10 @@ func TestCopyConfigurationResources(t *testing.T) { ctx := context.Background() require.ParallelCapacity(t, 2) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns1 := setupNamespace(t, tClient) ns2 := setupNamespace(t, tClient) @@ -2643,8 +2646,7 @@ func TestGenerateBackupJobIntent(t *testing.T) { ns := setupNamespace(t, cc) r := &Reconciler{ - Client: cc, - Owner: controllerName, + Reader: cc, } ctx := context.Background() @@ -3439,11 +3441,8 @@ volumes: } func TestGenerateRepoHostIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ctx := context.Background() - r := Reconciler{Client: cc} + r := Reconciler{} t.Run("empty", func(t *testing.T) { _, err := r.generateRepoHostIntent(ctx, &v1beta1.PostgresCluster{}, "", &RepoResources{}, @@ -3529,12 +3528,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { } func TestGenerateRestoreJobIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - r := Reconciler{ - Client: cc, - } + r := Reconciler{} t.Run("empty", func(t *testing.T) { err := r.generateRestoreJobIntent(&v1beta1.PostgresCluster{}, "", "", @@ -3756,7 +3750,7 @@ func TestObserveRestoreEnv(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string, completed, failed *bool) *batchv1.Job { @@ -3856,18 +3850,18 @@ func TestObserveRestoreEnv(t *testing.T) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, fakeFailoverEP)) job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3880,15 +3874,15 @@ func TestObserveRestoreEnv(t *testing.T) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, fakeFailoverEP)) }, result: testResult{ foundRestoreJob: false, @@ -3899,7 +3893,7 @@ func TestObserveRestoreEnv(t *testing.T) { desc: "restore job only exists", createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3913,8 +3907,8 @@ func TestObserveRestoreEnv(t *testing.T) { t.Skip("requires mocking of Job conditions") } job := generateJob(cluster.Name, initialize.Bool(true), nil) - assert.NilError(t, r.Client.Create(ctx, job.DeepCopy())) - assert.NilError(t, r.Client.Status().Update(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job.DeepCopy())) + assert.NilError(t, tClient.Status().Update(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3933,8 +3927,8 @@ func TestObserveRestoreEnv(t *testing.T) { t.Skip("requires mocking of Job conditions") } job := generateJob(cluster.Name, nil, initialize.Bool(true)) - assert.NilError(t, r.Client.Create(ctx, job.DeepCopy())) - assert.NilError(t, r.Client.Status().Update(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job.DeepCopy())) + assert.NilError(t, tClient.Status().Update(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3984,7 +3978,9 @@ func TestPrepareForRestore(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Writer: client.WithFieldOwner(tClient, t.Name()), + } namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string) *batchv1.Job { @@ -4038,7 +4034,7 @@ func TestPrepareForRestore(t *testing.T) { createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) (*batchv1.Job, []corev1.Endpoints) { job := generateJob(cluster.Name) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) return job, nil }, result: testResult{ @@ -4058,15 +4054,15 @@ func TestPrepareForRestore(t *testing.T) { fakeLeaderEP := corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, &fakeLeaderEP)) fakeDCSEP := corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, &fakeDCSEP)) fakeFailoverEP := corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, &fakeFailoverEP)) return nil, []corev1.Endpoints{fakeLeaderEP, fakeDCSEP, fakeFailoverEP} }, result: testResult{ @@ -4173,19 +4169,19 @@ func TestPrepareForRestore(t *testing.T) { leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} currentEndpoints := []corev1.Endpoints{} - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { currentEndpoints = append(currentEndpoints, leaderEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), &dcsEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { currentEndpoints = append(currentEndpoints, dcsEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), &failoverEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { @@ -4193,7 +4189,7 @@ func TestPrepareForRestore(t *testing.T) { } restoreJobs := &batchv1.JobList{} - assert.NilError(t, r.Client.List(ctx, restoreJobs, &client.ListOptions{ + assert.NilError(t, tClient.List(ctx, restoreJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), })) @@ -4229,9 +4225,9 @@ func TestReconcileScheduledBackups(t *testing.T) { r := &Reconciler{} _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -4492,7 +4488,7 @@ func TestSetScheduledJobStatus(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} clusterName := "hippocluster" clusterUID := "hippouid" @@ -4565,9 +4561,9 @@ func TestBackupsEnabled(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -4723,8 +4719,7 @@ func TestGetRepoHostVolumeRequests(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Reader: tClient, Recorder: new(record.FakeRecorder), } diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index 8b74f20a6..56203189d 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -97,7 +97,7 @@ func (r *Reconciler) reconcilePGBouncerConfigMap( // PgBouncer is disabled; delete the ConfigMap if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(configmap) - err := errors.WithStack(r.Client.Get(ctx, key, configmap)) + err := errors.WithStack(r.Reader.Get(ctx, key, configmap)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, configmap)) } @@ -230,7 +230,7 @@ func (r *Reconciler) reconcilePGBouncerSecret( ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.ClusterPGBouncer(cluster)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -374,7 +374,7 @@ func (r *Reconciler) reconcilePGBouncerService( // PgBouncer is disabled; delete the Service if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(service) - err := errors.WithStack(r.Client.Get(ctx, key, service)) + err := errors.WithStack(r.Reader.Get(ctx, key, service)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, service)) } @@ -565,7 +565,7 @@ func (r *Reconciler) reconcilePGBouncerDeployment( // PgBouncer is disabled; delete the Deployment if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(deploy) - err := errors.WithStack(r.Client.Get(ctx, key, deploy)) + err := errors.WithStack(r.Reader.Get(ctx, key, deploy)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, deploy)) } @@ -590,7 +590,7 @@ func (r *Reconciler) reconcilePGBouncerPodDisruptionBudget( ) error { deleteExistingPDB := func(cluster *v1beta1.PostgresCluster) error { existing := &policyv1.PodDisruptionBudget{ObjectMeta: naming.ClusterPGBouncer(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, existing)) } diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index 26f6637ea..78527131e 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -27,11 +27,7 @@ import ( ) func TestGeneratePGBouncerService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -263,7 +259,10 @@ func TestReconcilePGBouncerService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -365,11 +364,8 @@ func TestReconcilePGBouncerService(t *testing.T) { } func TestGeneratePGBouncerDeployment(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ctx := context.Background() - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns3" @@ -548,15 +544,15 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } foundPDB := func( cluster *v1beta1.PostgresCluster, ) bool { got := &policyv1.PodDisruptionBudget{} - err := r.Client.Get(ctx, + err := cc.Get(ctx, naming.AsObjectKey(naming.ClusterPGBouncer(cluster)), got) return !apierrors.IsNotFound(err) @@ -595,8 +591,8 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) @@ -622,8 +618,8 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index 9a6043f86..e30bf3f56 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -153,7 +153,7 @@ func (r *Reconciler) reconcileMonitoringSecret( existing := &corev1.Secret{ObjectMeta: naming.MonitoringUserSecret(cluster)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -380,7 +380,7 @@ func (r *Reconciler) reconcileExporterWebConfig(ctx context.Context, } existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterWebConfigMap(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -439,7 +439,7 @@ func (r *Reconciler) reconcileExporterQueriesConfig(ctx context.Context, cluster *v1beta1.PostgresCluster) (*corev1.ConfigMap, error) { existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterQueriesConfigMap(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index e4ccaf0d9..e91b176ec 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -702,7 +702,10 @@ func TestReconcileMonitoringSecret(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Default() @@ -776,7 +779,10 @@ func TestReconcileExporterQueriesConfig(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Default() diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index d52d6a75d..0e686d4f7 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -42,7 +42,7 @@ func (r *Reconciler) reconcileRootCertificate( existing := &corev1.Secret{} existing.Namespace, existing.Name = cluster.Namespace, naming.RootCertSecret err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) root := &pki.RootCertificateAuthority{} @@ -120,7 +120,7 @@ func (r *Reconciler) reconcileClusterCertificate( existing := &corev1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) leaf := &pki.LeafCertificate{} dnsNames := append(naming.ServiceDNSNames(ctx, primaryService), naming.ServiceDNSNames(ctx, replicaService)...) diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index ed74b1220..b61e98325 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -42,8 +42,8 @@ func TestReconcileCerts(t *testing.T) { namespace := setupNamespace(t, tClient).Name r := &Reconciler{ - Client: tClient, - Owner: controllerName, + Reader: tClient, + Writer: client.WithFieldOwner(tClient, controllerName), } // set up cluster1 diff --git a/internal/controller/postgrescluster/pod_disruption_budget_test.go b/internal/controller/postgrescluster/pod_disruption_budget_test.go index 6463068d4..e8cbffc19 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget_test.go +++ b/internal/controller/postgrescluster/pod_disruption_budget_test.go @@ -13,14 +13,11 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGeneratePodDisruptionBudget(t *testing.T) { - _, cc := setupKubernetes(t) - r := &Reconciler{Client: cc} - require.ParallelCapacity(t, 0) + r := &Reconciler{} var ( minAvailable *intstr.IntOrString diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index d45e94403..beaec3cdf 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -542,7 +542,7 @@ func (r *Reconciler) reconcilePostgresUserSecrets( selector, err := naming.AsSelector(naming.ClusterPostgresUsers(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, secrets, + r.Reader.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -898,7 +898,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( // No WAL volume is specified; delete the PVC safely if it exists. Check // the client cache first using Get. key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err != nil { return nil, client.IgnoreNotFound(err) } @@ -1003,7 +1003,7 @@ func (r *Reconciler) reconcileDatabaseInitSQL(ctx context.Context, Namespace: cluster.Namespace, }, } - err := r.Client.Get(ctx, client.ObjectKeyFromObject(cm), cm) + err := r.Reader.Get(ctx, client.ObjectKeyFromObject(cm), cm) if err != nil { return "", err } diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 48591d8d4..7754f73c4 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -283,10 +283,7 @@ func TestGeneratePostgresParameters(t *testing.T) { } func TestGeneratePostgresUserSecret(t *testing.T) { - _, tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: tClient} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns1" @@ -484,8 +481,8 @@ func TestReconcilePostgresVolumes(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), } t.Run("DataVolumeNoSourceCluster", func(t *testing.T) { @@ -588,7 +585,7 @@ volumeMode: Filesystem assert.NilError(t, err) // Get snapshot and update Status.ReadyToUse and CreationTime - err = reconciler.Client.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) + err = tClient.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) assert.NilError(t, err) currentTime := metav1.Now() @@ -596,7 +593,7 @@ volumeMode: Filesystem ReadyToUse: initialize.Bool(true), CreationTime: ¤tTime, } - err = reconciler.Client.Status().Update(ctx, snapshot) + err = tClient.Status().Update(ctx, snapshot) assert.NilError(t, err) // Reconcile volume @@ -861,7 +858,7 @@ func TestReconcileDatabaseInitSQL(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: client, + Reader: client, // Overwrite the PodExec function with a check to ensure the exec // call would have been made @@ -985,7 +982,7 @@ func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: client, + Reader: client, // Overwrite the PodExec function with a check to ensure the exec // call would have been made diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index a16bd650f..74e506f45 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -196,7 +196,7 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( // Check the client cache first using Get. if cluster.Spec.Backups.Snapshots == nil { key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) } @@ -263,13 +263,13 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( patch := client.RawPatch(client.Merge.Type(), []byte(annotations)) err = r.handlePersistentVolumeClaimError(cluster, - errors.WithStack(r.patch(ctx, pvc, patch))) + errors.WithStack(r.Writer.Patch(ctx, pvc, patch))) if err != nil { return pvc, err } - err = r.Client.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) + err = r.Writer.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) return pvc, errors.WithStack(err) } @@ -459,7 +459,7 @@ func (r *Reconciler) getDedicatedSnapshotVolumeRestoreJob(ctx context.Context, selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(postgrescluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, jobs, + r.Reader.List(ctx, jobs, client.InNamespace(postgrescluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -489,7 +489,7 @@ func (r *Reconciler) getLatestCompleteBackupJob(ctx context.Context, selectJobs, err := naming.AsSelector(naming.ClusterBackupJobs(postgrescluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, jobs, + r.Reader.List(ctx, jobs, client.InNamespace(postgrescluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -555,7 +555,7 @@ func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta } snapshots := &volumesnapshotv1.VolumeSnapshotList{} err = errors.WithStack( - r.Client.List(ctx, snapshots, + r.Reader.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index af5d4d124..83efcad70 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -39,9 +39,9 @@ func TestReconcileVolumeSnapshots(t *testing.T) { recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, Recorder: recorder, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -60,8 +60,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create a snapshot pvc := &corev1.PersistentVolumeClaim{ @@ -72,14 +72,14 @@ func TestReconcileVolumeSnapshots(t *testing.T) { volumeSnapshotClassName := "my-snapshotclass" snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) assert.NilError(t, err) - assert.NilError(t, r.Client.Create(ctx, snapshot)) + assert.NilError(t, cc.Create(ctx, snapshot)) // Get all snapshots for this cluster and assert 1 exists selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -91,7 +91,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Get all snapshots for this cluster and assert 0 exist snapshots = &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -147,8 +147,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -156,8 +156,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc for reconcile pvc := &corev1.PersistentVolumeClaim{ @@ -174,7 +174,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -193,8 +193,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -203,8 +203,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc with annotation pvcName := initialize.String("dedicated-snapshot-volume") @@ -240,14 +240,14 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot1)) - assert.NilError(t, r.Client.Create(ctx, snapshot1)) + assert.NilError(t, cc.Create(ctx, snapshot1)) // Update snapshot status truePtr := initialize.Bool(true) snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - assert.NilError(t, r.Client.Status().Update(ctx, snapshot1)) + assert.NilError(t, cc.Status().Update(ctx, snapshot1)) // Create second snapshot with different annotation value snapshot2 := &volumesnapshotv1.VolumeSnapshot{ @@ -272,13 +272,13 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) // Update second snapshot's status snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - assert.NilError(t, r.Client.Status().Update(ctx, snapshot2)) + assert.NilError(t, cc.Status().Update(ctx, snapshot2)) // Reconcile assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) @@ -288,7 +288,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -308,8 +308,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -318,8 +318,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc with annotation pvcName := initialize.String("dedicated-snapshot-volume") @@ -340,7 +340,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -356,9 +356,9 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, Recorder: recorder, + Writer: client.WithFieldOwner(cc, t.Name()), } // Enable snapshots feature gate @@ -374,8 +374,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create a dedicated snapshot volume pvc := &corev1.PersistentVolumeClaim{ @@ -396,14 +396,14 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { spec := testVolumeClaimSpec() pvc.Spec = spec.AsPersistentVolumeClaimSpec() assert.NilError(t, r.setControllerReference(cluster, pvc)) - assert.NilError(t, r.Client.Create(ctx, pvc)) + assert.NilError(t, cc.Create(ctx, pvc)) // Assert that the pvc was created selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} assert.NilError(t, - r.Client.List(ctx, pvcs, + cc.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) @@ -419,7 +419,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Assert that the pvc has been deleted or marked for deletion key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} - if err := r.Client.Get(ctx, key, fetched); err == nil { + if err := cc.Get(ctx, key, fetched); err == nil { assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") } else { assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) @@ -435,8 +435,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create volumes for reconcile clusterVolumes := []*corev1.PersistentVolumeClaim{} @@ -451,7 +451,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} assert.NilError(t, - r.Client.List(ctx, pvcs, + cc.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) @@ -470,18 +470,18 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) currentTime := metav1.Now() startTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) backupJob.Status = succeededJobStatus(startTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -498,7 +498,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, restoreJobs, + cc.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -518,8 +518,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create times for jobs currentTime := metav1.Now() @@ -530,10 +530,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) backupJob.Status = succeededJobStatus(earlierStartTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create successful restore job restoreJob := testRestoreJob(cluster) @@ -541,10 +541,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } assert.NilError(t, r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, r.Client.Create(ctx, restoreJob)) + assert.NilError(t, cc.Create(ctx, restoreJob)) restoreJob.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) + assert.NilError(t, cc.Status().Update(ctx, restoreJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -561,7 +561,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, restoreJobs, + cc.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -583,8 +583,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create times for jobs currentTime := metav1.Now() @@ -594,10 +594,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) backupJob.Status = succeededJobStatus(startTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create failed restore job restoreJob := testRestoreJob(cluster) @@ -605,13 +605,13 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } assert.NilError(t, r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, r.Client.Create(ctx, restoreJob)) + assert.NilError(t, cc.Create(ctx, restoreJob)) restoreJob.Status = batchv1.JobStatus{ Succeeded: 0, Failed: 1, } - assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) + assert.NilError(t, cc.Status().Update(ctx, restoreJob)) // Setup instances and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -639,8 +639,7 @@ func TestCreateDedicatedSnapshotVolume(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -667,8 +666,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -695,7 +693,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, jobs, + cc.List(ctx, jobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -709,8 +707,7 @@ func TestGenerateSnapshotOfDedicatedSnapshotVolume(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, } ns := setupNamespace(t, cc) @@ -740,8 +737,7 @@ func TestGenerateVolumeSnapshot(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, } ns := setupNamespace(t, cc) @@ -769,8 +765,8 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -787,7 +783,7 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { job1 := testRestoreJob(cluster) job1.Namespace = ns.Name - err := r.Client.Create(ctx, job1) + err := cc.Create(ctx, job1) assert.NilError(t, err) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) @@ -803,14 +799,14 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { naming.PGBackRestBackupJobCompletion: "backup-timestamp", } - err := r.Client.Create(ctx, job2) + err := cc.Create(ctx, job2) assert.NilError(t, err) job3 := testRestoreJob(cluster) job3.Name = "restore-job-3" job3.Namespace = ns.Name - assert.NilError(t, r.Client.Create(ctx, job3)) + assert.NilError(t, cc.Create(ctx, job3)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -824,8 +820,8 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -842,7 +838,7 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job1 := testBackupJob(cluster) job1.Namespace = ns.Name - err := r.Client.Create(ctx, job1) + err := cc.Create(ctx, job1) assert.NilError(t, err) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) @@ -867,13 +863,13 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job2.Namespace = ns.Name job2.Name = "backup-job-2" - assert.NilError(t, r.Client.Create(ctx, job2)) + assert.NilError(t, cc.Create(ctx, job2)) // Get job1 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, job1)) + assert.NilError(t, cc.Status().Update(ctx, job1)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -903,16 +899,16 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, job1)) + assert.NilError(t, cc.Status().Update(ctx, job1)) // Get job2 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job2), job2)) job2.Status = succeededJobStatus(earlierStartTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, job2)) + assert.NilError(t, cc.Status().Update(ctx, job2)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -1024,8 +1020,8 @@ func TestGetSnapshotsForCluster(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -1054,7 +1050,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - assert.NilError(t, r.Client.Create(ctx, snapshot)) + assert.NilError(t, cc.Create(ctx, snapshot)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1095,7 +1091,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1242,25 +1238,24 @@ func TestDeleteSnapshots(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) rhinoCluster := testCluster() rhinoCluster.Name = "rhino" rhinoCluster.Namespace = ns.Name rhinoCluster.UID = "the-uid-456" - assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) + assert.NilError(t, cc.Create(ctx, rhinoCluster)) t.Cleanup(func() { - assert.Check(t, r.Client.Delete(ctx, cluster)) - assert.Check(t, r.Client.Delete(ctx, rhinoCluster)) + assert.Check(t, cc.Delete(ctx, cluster)) + assert.Check(t, cc.Delete(ctx, rhinoCluster)) }) t.Run("NoSnapshots", func(t *testing.T) { @@ -1287,7 +1282,7 @@ func TestDeleteSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, r.Client.Create(ctx, snapshot1)) + assert.NilError(t, cc.Create(ctx, snapshot1)) snapshots := []*volumesnapshotv1.VolumeSnapshot{ snapshot1, @@ -1295,7 +1290,7 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, existingSnapshots, + cc.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) assert.Equal(t, len(existingSnapshots.Items), 1) @@ -1337,7 +1332,7 @@ func TestDeleteSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) snapshots := []*volumesnapshotv1.VolumeSnapshot{ snapshot1, snapshot2, @@ -1345,7 +1340,7 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, existingSnapshots, + cc.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) assert.Equal(t, len(existingSnapshots.Items), 1) diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index a26fa05e7..93c8ded14 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -18,7 +18,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -41,7 +40,7 @@ func (r *Reconciler) observePersistentVolumeClaims( selector, err := naming.AsSelector(naming.Cluster(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, volumes, + r.Reader.List(ctx, volumes, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -392,7 +391,7 @@ func (r *Reconciler) reconcileDirMoveJobs(ctx context.Context, cluster.Spec.DataSource.Volumes != nil { var list batchv1.JobList - if err := r.Client.List(ctx, &list, &client.ListOptions{ + if err := r.Reader.List(ctx, &list, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), }); err != nil { @@ -547,8 +546,7 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } @@ -666,8 +664,7 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } @@ -788,8 +785,7 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index c579e3f57..2f6db3198 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -375,7 +375,10 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns := setupNamespace(t, tClient) cluster := &v1beta1.PostgresCluster{ @@ -637,7 +640,10 @@ func TestReconcileMoveDirectories(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns := setupNamespace(t, tClient) cluster := &v1beta1.PostgresCluster{ @@ -732,7 +738,7 @@ func TestReconcileMoveDirectories(t *testing.T) { assert.Assert(t, returnEarly) moveJobs := &batchv1.JobList{} - err = r.Client.List(ctx, moveJobs, &client.ListOptions{ + err = tClient.List(ctx, moveJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), }) From a6ed5706bf34b1e4634ee2a91606fdd6ee744fc7 Mon Sep 17 00:00:00 2001 From: jmckulk Date: Fri, 19 Sep 2025 15:14:01 -0400 Subject: [PATCH 11/43] Run Shellcheck on instance reload command Shellcheck was being run on the reload command for pgBackRest. During the auto-grow work, we noticed that the reload command for the instance was not being checked. Since we had to update the auto-grow bash logic to pass Shellcheck for pgBackRest, it seemed appropriate to enable the checks for the instance as well. --- internal/postgres/config.go | 35 +++++++++++++++-------- internal/postgres/config_test.go | 43 +++++++++++++++++++++++++++++ internal/postgres/reconcile_test.go | 24 ++++++++-------- 3 files changed, 80 insertions(+), 22 deletions(-) diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 0300d4d34..75371e6af 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -298,13 +298,24 @@ func reloadCommand( // descriptor gets closed and reopened to use the builtin `[ -nt` to check // mtimes. // - https://unix.stackexchange.com/a/407383 + // + // In the manageAutogrowAnnotation function below, df is used to return the + // relevant volume size in Mebibytes. The 'read' variable gets the value from + // the '1M-blocks' output (second column) and the 'use' variable gets the value + // from the 'Use%' column (fifth column). This value is grabbed after stripping + // out the column headers (before the '\n') and then getting the respective + // value delimited by the white spaces by using the 'read -r' command. + // The underscores (_) discard fields and the variables store them. This allows + // for selective parsing of the provided lines. The percent value is stripped of + // the '%' and then used to determine if a expansion should be triggered by + // setting the calculated volume size using the 'size' variable. script := fmt.Sprintf(` # Parameters for curl when managing autogrow annotation. APISERVER="https://kubernetes.default.svc" SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" -NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) -TOKEN=$(cat ${SERVICEACCOUNT}/token) -CACERT=${SERVICEACCOUNT}/ca.crt +NAMESPACE=$(cat "${SERVICEACCOUNT}/namespace") +TOKEN=$(cat "${SERVICEACCOUNT}/token") +CACERT="${SERVICEACCOUNT}/ca.crt" # Manage autogrow annotation. # Return size in Mebibytes. @@ -313,27 +324,29 @@ manageAutogrowAnnotation() { local trigger=$2 local maxGrow=$3 - size=$(df --block-size=M /"${volume}" | awk 'FNR == 2 {print $2}') - use=$(df /"${volume}" | awk 'FNR == 2 {print $5}') + size=$(df --block-size=M /"${volume}") + read -r _ size _ <<< "${size#*$'\n'}" + use=$(df /"${volume}") + read -r _ _ _ _ use _ <<< "${use#*$'\n'}" sizeInt="${size//M/}" # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. - useInt=$(echo $use | sed 's/[[:punct:]]//g') + useInt=${use//[[:punct:]]/} triggerExpansion="$((useInt > trigger))" - if [[ $triggerExpansion -eq 1 ]]; then + if [[ ${triggerExpansion} -eq 1 ]]; then newSize="$(((sizeInt / 2)+sizeInt))" # Only compare with maxGrow if it is set (not empty) - if [[ -n "$maxGrow" ]]; then + if [[ -n "${maxGrow}" ]]; then # check to see how much we would normally grow sizeDiff=$((newSize - sizeInt)) # Compare the size difference to the maxGrow; if it is greater, cap it to maxGrow - if [[ $sizeDiff -gt $maxGrow ]]; then + if [[ ${sizeDiff} -gt ${maxGrow} ]]; then newSize=$((sizeInt + maxGrow)) fi fi newSizeMi="${newSize}Mi" - d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"$newSizeMi"'"}]' - curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"${newSizeMi}"'"}]' + curl --cacert "${CACERT}" --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "${d}" fi } diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index 53bcfc1bf..124596933 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -18,9 +18,11 @@ import ( "time" "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -600,3 +602,44 @@ EOF chmod +x /tmp/pg_rewind_tde.sh`)) }) } + +func TestReloadCommand(t *testing.T) { + shellcheck := require.ShellCheck(t) + + pgdataSize := resource.MustParse("1Gi") + pgwalSize := resource.MustParse("2Gi") + + command := reloadCommand( + "some-name", + &v1beta1.VolumeClaimSpecWithAutoGrow{ + AutoGrow: &v1beta1.AutoGrowSpec{ + Trigger: initialize.Int32(10), + MaxGrow: &pgdataSize, + }, + }, + &v1beta1.VolumeClaimSpecWithAutoGrow{ + AutoGrow: &v1beta1.AutoGrowSpec{ + Trigger: initialize.Int32(20), + MaxGrow: &pgwalSize, + }, + }, + ) + + // Expect a bash command with an inline script. + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) + + // Write out that inline script. + dir := t.TempDir() + file := filepath.Join(dir, "script.bash") + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) + + // Expect shellcheck to be happy. + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + + assert.Assert(t, cmp.Contains(command[3], "manageAutogrowAnnotation \"pgdata\" \"10\" \"1024\"")) + assert.Assert(t, cmp.Contains(command[3], "manageAutogrowAnnotation \"pgwal\" \"20\" \"2048\"")) + +} diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index a72672824..73ac1125d 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -173,9 +173,9 @@ containers: # Parameters for curl when managing autogrow annotation. APISERVER="https://kubernetes.default.svc" SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" - NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) - TOKEN=$(cat ${SERVICEACCOUNT}/token) - CACERT=${SERVICEACCOUNT}/ca.crt + NAMESPACE=$(cat "${SERVICEACCOUNT}/namespace") + TOKEN=$(cat "${SERVICEACCOUNT}/token") + CACERT="${SERVICEACCOUNT}/ca.crt" # Manage autogrow annotation. # Return size in Mebibytes. @@ -184,27 +184,29 @@ containers: local trigger=$2 local maxGrow=$3 - size=$(df --block-size=M /"${volume}" | awk 'FNR == 2 {print $2}') - use=$(df /"${volume}" | awk 'FNR == 2 {print $5}') + size=$(df --block-size=M /"${volume}") + read -r _ size _ <<< "${size#*$'\n'}" + use=$(df /"${volume}") + read -r _ _ _ _ use _ <<< "${use#*$'\n'}" sizeInt="${size//M/}" # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. - useInt=$(echo $use | sed 's/[[:punct:]]//g') + useInt=${use//[[:punct:]]/} triggerExpansion="$((useInt > trigger))" - if [[ $triggerExpansion -eq 1 ]]; then + if [[ ${triggerExpansion} -eq 1 ]]; then newSize="$(((sizeInt / 2)+sizeInt))" # Only compare with maxGrow if it is set (not empty) - if [[ -n "$maxGrow" ]]; then + if [[ -n "${maxGrow}" ]]; then # check to see how much we would normally grow sizeDiff=$((newSize - sizeInt)) # Compare the size difference to the maxGrow; if it is greater, cap it to maxGrow - if [[ $sizeDiff -gt $maxGrow ]]; then + if [[ ${sizeDiff} -gt ${maxGrow} ]]; then newSize=$((sizeInt + maxGrow)) fi fi newSizeMi="${newSize}Mi" - d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"$newSizeMi"'"}]' - curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"${newSizeMi}"'"}]' + curl --cacert "${CACERT}" --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "${d}" fi } From f2252fb9c376caf1f15e36fcb8d05bf22906e419 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Fri, 7 Mar 2025 12:05:46 -0600 Subject: [PATCH 12/43] Use int32 for PostgresVersion in Go structs This aligns the PostgresCluster and PGUpgrade structs. Tidy up some Job YAML, too. --- ...ator.crunchydata.com_postgresclusters.yaml | 4 ++ internal/collector/postgres.go | 2 +- .../pgupgrade/pgupgrade_controller.go | 2 +- .../controller/postgrescluster/volumes.go | 64 +++++++++---------- .../postgrescluster/volumes_test.go | 54 ++++++++++------ internal/pgbackrest/config.go | 7 +- internal/postgres/config_test.go | 2 +- internal/postgres/versions.go | 4 +- .../v1/postgrescluster_types.go | 4 +- .../v1beta1/postgrescluster_types.go | 4 +- 10 files changed, 80 insertions(+), 67 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 8556b11d2..ce139e4c7 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -13035,6 +13035,7 @@ spec: postgresVersion: description: The major version of PostgreSQL installed in the PostgreSQL image + format: int32 maximum: 17 minimum: 11 type: integer @@ -18812,6 +18813,7 @@ spec: description: |- Stores the current PostgreSQL major version following a successful major PostgreSQL upgrade. + format: int32 type: integer proxy: description: Current state of the PostgreSQL proxy. @@ -31867,6 +31869,7 @@ spec: postgresVersion: description: The major version of PostgreSQL installed in the PostgreSQL image + format: int32 maximum: 17 minimum: 11 type: integer @@ -37612,6 +37615,7 @@ spec: description: |- Stores the current PostgreSQL major version following a successful major PostgreSQL upgrade. + format: int32 type: integer proxy: description: Current state of the PostgreSQL proxy. diff --git a/internal/collector/postgres.go b/internal/collector/postgres.go index ca627a8fd..a279be33c 100644 --- a/internal/collector/postgres.go +++ b/internal/collector/postgres.go @@ -92,7 +92,7 @@ func NewConfigForPostgresPod(ctx context.Context, var postgresLogsTransforms json.RawMessage // postgresCSVNames returns the names of fields in the CSV logs for version. -func postgresCSVNames(version int) string { +func postgresCSVNames(version int32) string { // JSON is the preferred format, so use those names. // https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-JSONLOG diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index 653ea9e55..f5a3fc598 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -440,7 +440,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Set the cluster status when we know the upgrade has completed successfully. // This will serve to help the user see that the upgrade has completed if they // are only watching the PostgresCluster - patch.Status.PostgresVersion = int(upgrade.Spec.ToPostgresVersion) + patch.Status.PostgresVersion = upgrade.Spec.ToPostgresVersion // Set the pgBackRest status for bootstrapping patch.Status.PGBackRest.Repos = []v1beta1.RepoStatus{} diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index 93c8ded14..919633377 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -7,7 +7,6 @@ package postgrescluster import ( "context" "fmt" - "strconv" "github.com/pkg/errors" batchv1 "k8s.io/api/batch/v1" @@ -476,21 +475,20 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // `patroni.dynamic.json` holds the previous state of the DCS. Since we are // migrating the volumes, we want to clear out any obsolete configuration info. script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" - echo "pgdata_pvc=%s" - echo "Current PG data directory volume contents:" - ls -lh "/pgdata" - echo "Now updating PG data directory..." - [ -d "/pgdata/%s" ] && mv "/pgdata/%s" "/pgdata/pg%s_bootstrap" - rm -f "/pgdata/pg%s/patroni.dynamic.json" - echo "Updated PG data directory contents:" - ls -lh "/pgdata" - echo "PG Data directory preparation complete" - `, cluster.Name, +echo "pgdata_pvc=%s" +echo "Current PG data directory volume contents:" +ls -lh "/pgdata" +echo "Now updating PG data directory..." +[ -d "/pgdata/%s" ] && mv "/pgdata/%s" "/pgdata/pg%d_bootstrap" +rm -f "/pgdata/pg%d/patroni.dynamic.json" +echo "Updated PG data directory contents:" +ls -lh "/pgdata" +echo "PG Data directory preparation complete"`, cluster.Name, cluster.Spec.DataSource.Volumes.PGDataVolume.PVCName, cluster.Spec.DataSource.Volumes.PGDataVolume.Directory, cluster.Spec.DataSource.Volumes.PGDataVolume.Directory, - strconv.Itoa(cluster.Spec.PostgresVersion), - strconv.Itoa(cluster.Spec.PostgresVersion)) + cluster.Spec.PostgresVersion, + cluster.Spec.PostgresVersion) container := corev1.Container{ Command: []string{"bash", "-ceu", script}, @@ -596,15 +594,14 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" - echo "pg_wal_pvc=%s" - echo "Current PG WAL directory volume contents:" - ls -lh "/pgwal" - echo "Now updating PG WAL directory..." - [ -d "/pgwal/%s" ] && mv "/pgwal/%s" "/pgwal/%s-wal" - echo "Updated PG WAL directory contents:" - ls -lh "/pgwal" - echo "PG WAL directory preparation complete" - `, cluster.Name, +echo "pg_wal_pvc=%s" +echo "Current PG WAL directory volume contents:" +ls -lh "/pgwal" +echo "Now updating PG WAL directory..." +[ -d "/pgwal/%s" ] && mv "/pgwal/%s" "/pgwal/%s-wal" +echo "Updated PG WAL directory contents:" +ls -lh "/pgwal" +echo "PG WAL directory preparation complete"`, cluster.Name, cluster.Spec.DataSource.Volumes.PGWALVolume.PVCName, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, @@ -714,18 +711,17 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s pgBackRest repo volume for PGO v5.x" - echo "repo_pvc=%s" - echo "pgbackrest directory:" - ls -lh /pgbackrest - echo "Current pgBackRest repo directory volume contents:" - ls -lh "/pgbackrest/%s" - echo "Now updating repo directory..." - [ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/archive" - [ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/backup" - echo "Updated /pgbackrest directory contents:" - ls -lh "/pgbackrest" - echo "Repo directory preparation complete" - `, cluster.Name, +echo "repo_pvc=%s" +echo "pgbackrest directory:" +ls -lh /pgbackrest +echo "Current pgBackRest repo directory volume contents:" +ls -lh "/pgbackrest/%s" +echo "Now updating repo directory..." +[ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/archive" +[ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/backup" +echo "Updated /pgbackrest directory contents:" +ls -lh "/pgbackrest" +echo "Repo directory preparation complete"`, cluster.Name, cluster.Spec.DataSource.Volumes.PGBackRestVolume.PVCName, cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index 2f6db3198..440416461 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -754,12 +754,17 @@ containers: - command: - bash - -ceu - - "echo \"Preparing cluster testcluster volumes for PGO v5.x\"\n echo \"pgdata_pvc=testpgdata\"\n - \ echo \"Current PG data directory volume contents:\" \n ls -lh \"/pgdata\"\n - \ echo \"Now updating PG data directory...\"\n [ -d \"/pgdata/testpgdatadir\" - ] && mv \"/pgdata/testpgdatadir\" \"/pgdata/pg13_bootstrap\"\n rm -f \"/pgdata/pg13/patroni.dynamic.json\"\n - \ echo \"Updated PG data directory contents:\" \n ls -lh \"/pgdata\"\n echo - \"PG Data directory preparation complete\"\n " + - |- + echo "Preparing cluster testcluster volumes for PGO v5.x" + echo "pgdata_pvc=testpgdata" + echo "Current PG data directory volume contents:" + ls -lh "/pgdata" + echo "Now updating PG data directory..." + [ -d "/pgdata/testpgdatadir" ] && mv "/pgdata/testpgdatadir" "/pgdata/pg13_bootstrap" + rm -f "/pgdata/pg13/patroni.dynamic.json" + echo "Updated PG data directory contents:" + ls -lh "/pgdata" + echo "PG Data directory preparation complete" image: example.com/crunchy-postgres-ha:test imagePullPolicy: Always name: pgdata-move-job @@ -814,12 +819,16 @@ containers: - command: - bash - -ceu - - "echo \"Preparing cluster testcluster volumes for PGO v5.x\"\n echo \"pg_wal_pvc=testwal\"\n - \ echo \"Current PG WAL directory volume contents:\"\n ls -lh \"/pgwal\"\n - \ echo \"Now updating PG WAL directory...\"\n [ -d \"/pgwal/testwaldir\" - ] && mv \"/pgwal/testwaldir\" \"/pgwal/testcluster-wal\"\n echo \"Updated PG - WAL directory contents:\"\n ls -lh \"/pgwal\"\n echo \"PG WAL directory - preparation complete\"\n " + - |- + echo "Preparing cluster testcluster volumes for PGO v5.x" + echo "pg_wal_pvc=testwal" + echo "Current PG WAL directory volume contents:" + ls -lh "/pgwal" + echo "Now updating PG WAL directory..." + [ -d "/pgwal/testwaldir" ] && mv "/pgwal/testwaldir" "/pgwal/testcluster-wal" + echo "Updated PG WAL directory contents:" + ls -lh "/pgwal" + echo "PG WAL directory preparation complete" image: example.com/crunchy-postgres-ha:test imagePullPolicy: Always name: pgwal-move-job @@ -874,14 +883,19 @@ containers: - command: - bash - -ceu - - "echo \"Preparing cluster testcluster pgBackRest repo volume for PGO v5.x\"\n - \ echo \"repo_pvc=testrepo\"\n echo \"pgbackrest directory:\"\n ls -lh - /pgbackrest\n echo \"Current pgBackRest repo directory volume contents:\" \n - \ ls -lh \"/pgbackrest/testrepodir\"\n echo \"Now updating repo directory...\"\n - \ [ -d \"/pgbackrest/testrepodir\" ] && mv -t \"/pgbackrest/\" \"/pgbackrest/testrepodir/archive\"\n - \ [ -d \"/pgbackrest/testrepodir\" ] && mv -t \"/pgbackrest/\" \"/pgbackrest/testrepodir/backup\"\n - \ echo \"Updated /pgbackrest directory contents:\"\n ls -lh \"/pgbackrest\"\n - \ echo \"Repo directory preparation complete\"\n " + - |- + echo "Preparing cluster testcluster pgBackRest repo volume for PGO v5.x" + echo "repo_pvc=testrepo" + echo "pgbackrest directory:" + ls -lh /pgbackrest + echo "Current pgBackRest repo directory volume contents:" + ls -lh "/pgbackrest/testrepodir" + echo "Now updating repo directory..." + [ -d "/pgbackrest/testrepodir" ] && mv -t "/pgbackrest/" "/pgbackrest/testrepodir/archive" + [ -d "/pgbackrest/testrepodir" ] && mv -t "/pgbackrest/" "/pgbackrest/testrepodir/backup" + echo "Updated /pgbackrest directory contents:" + ls -lh "/pgbackrest" + echo "Repo directory preparation complete" image: example.com/crunchy-pgbackrest:test imagePullPolicy: Always name: repo-move-job diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index f1d1fc30f..808354007 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "path" - "strconv" "strings" "time" @@ -109,7 +108,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet populatePGInstanceConfigurationMap( serviceName, serviceNamespace, repoHostName, pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), - strconv.Itoa(postgresCluster.Spec.PostgresVersion), + fmt.Sprint(postgresCluster.Spec.PostgresVersion), pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, util.GetPGBackRestLogPathForInstance(postgresCluster), @@ -130,7 +129,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet populateRepoHostConfigurationMap( serviceName, serviceNamespace, pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), - strconv.Itoa(postgresCluster.Spec.PostgresVersion), + fmt.Sprint(postgresCluster.Spec.PostgresVersion), pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, @@ -161,7 +160,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet populateCloudRepoConfigurationMap( serviceName, serviceNamespace, pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), - strconv.Itoa(postgresCluster.Spec.PostgresVersion), + fmt.Sprint(postgresCluster.Spec.PostgresVersion), cloudLogPath, pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index 124596933..c0960ac27 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -44,7 +44,7 @@ func TestDataDirectory(t *testing.T) { func TestDataStorage(t *testing.T) { cluster := new(v1beta1.PostgresCluster) - cluster.Spec.PostgresVersion = rand.IntN(20) + cluster.Spec.PostgresVersion = rand.Int32N(20) assert.Equal(t, DataStorage(cluster), "/pgdata") } diff --git a/internal/postgres/versions.go b/internal/postgres/versions.go index 17d067966..bf700d972 100644 --- a/internal/postgres/versions.go +++ b/internal/postgres/versions.go @@ -20,7 +20,7 @@ var finalReleaseDates = map[int]time.Time{ // ReleaseIsFinal returns whether or not t is definitively past the final // scheduled release of a Postgres version. -func ReleaseIsFinal(majorVersion int, t time.Time) bool { - known, ok := finalReleaseDates[majorVersion] +func ReleaseIsFinal[N ~int | ~int32](majorVersion N, t time.Time) bool { + known, ok := finalReleaseDates[int(majorVersion)] return ok && t.After(known) } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index 36177d1e4..8ae03bf25 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -144,7 +144,7 @@ type PostgresClusterSpec struct { // +kubebuilder:validation:Minimum=11 // +kubebuilder:validation:Maximum=17 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // The PostGIS extension version installed in the PostgreSQL image. // When image is not set, indicates a PostGIS enabled image will be used. @@ -391,7 +391,7 @@ type PostgresClusterStatus struct { // Stores the current PostgreSQL major version following a successful // major PostgreSQL upgrade. // +optional - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // Current state of the PostgreSQL proxy. // +optional diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 60a65d323..88c16b9af 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -129,7 +129,7 @@ type PostgresClusterSpec struct { // +kubebuilder:validation:Minimum=11 // +kubebuilder:validation:Maximum=17 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // The PostGIS extension version installed in the PostgreSQL image. // When image is not set, indicates a PostGIS enabled image will be used. @@ -375,7 +375,7 @@ type PostgresClusterStatus struct { // Stores the current PostgreSQL major version following a successful // major PostgreSQL upgrade. // +optional - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // Current state of the PostgreSQL proxy. // +optional From f73d542f8a4b1d80c8248273191cbd138a597ab2 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Thu, 14 Aug 2025 22:49:36 -0500 Subject: [PATCH 13/43] Change Reconciler implementations to ObjectReconciler Each implementation was doing the first fetch of its main object a bit differently. The controller-runtime module can do this since v0.17.0 and Go generics. --- .../crunchybridgecluster_controller.go | 56 +++++++------------ .../pgupgrade/pgupgrade_controller.go | 56 +++++++------------ .../postgrescluster/cluster_test.go | 9 +-- .../controller/postgrescluster/controller.go | 19 +------ .../postgrescluster/controller_test.go | 4 +- .../postgrescluster/instance_test.go | 5 +- .../standalone_pgadmin/controller.go | 32 ++++------- 7 files changed, 60 insertions(+), 121 deletions(-) diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go index 98f3897c0..8a3280f51 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -21,6 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/controller/runtime" @@ -50,8 +51,8 @@ type CrunchyBridgeClusterReconciler struct { } } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list,watch} -//+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,list,watch} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={get,list,watch} // ManagedReconciler creates a [CrunchyBridgeClusterReconciler] and adds it to m. func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) error { @@ -72,7 +73,7 @@ func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) // Smarter: retry after a certain time for each cluster WatchesRawSource( runtime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { var list v1beta1.CrunchyBridgeClusterList _ = reconciler.Reader.List(ctx, &list) return runtime.Requests(initialize.Pointers(list.Items...)...) @@ -82,11 +83,11 @@ func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters Watches( &corev1.Secret{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []reconcile.Request { return runtime.Requests(reconciler.findCrunchyBridgeClustersForSecret(ctx, client.ObjectKeyFromObject(secret))...) }), ). - Complete(reconciler) + Complete(reconcile.AsReconciler(kubernetes, reconciler)) } // The owner reference created by controllerutil.SetControllerReference blocks @@ -105,47 +106,32 @@ func (r *CrunchyBridgeClusterReconciler) setControllerReference( return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={patch,update} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/status",verbs={patch,update} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={patch,update} //+kubebuilder:rbac:groups="",resources="secrets",verbs={get} // Reconcile does the work to move the current state of the world toward the -// desired state described in a [v1beta1.CrunchyBridgeCluster] identified by req. -func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// desired state described in crunchybridgecluster. +func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster) (ctrl.Result, error) { + var err error ctx, span := tracing.Start(ctx, "reconcile-crunchybridgecluster") log := logging.FromContext(ctx) defer span.End() - // Retrieve the crunchybridgecluster from the client cache, if it exists. A deferred - // function below will send any changes to its Status field. - // - // NOTE: No DeepCopy is necessary here because controller-runtime makes a - // copy before returning from its cache. - // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 - crunchybridgecluster := &v1beta1.CrunchyBridgeCluster{} - err := r.Reader.Get(ctx, req.NamespacedName, crunchybridgecluster) + // Write any changes to the crunchybridgecluster status on the way out. + before := crunchybridgecluster.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { + status := r.StatusWriter.Patch(ctx, crunchybridgecluster, client.MergeFrom(before)) - if err == nil { - // Write any changes to the crunchybridgecluster status on the way out. - before := crunchybridgecluster.DeepCopy() - defer func() { - if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { - status := r.StatusWriter.Patch(ctx, crunchybridgecluster, client.MergeFrom(before)) - - if err == nil && status != nil { - err = status - } else if status != nil { - log.Error(status, "Patching CrunchyBridgeCluster status") - } + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching CrunchyBridgeCluster status") } - }() - } else { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from crunchybridgecluster's dependents after - // crunchybridgecluster is deleted. - return ctrl.Result{}, tracing.Escape(span, client.IgnoreNotFound(err)) - } + } + }() // Get and validate connection secret for requests key, team, err := r.reconcileBridgeConnectionSecret(ctx, crunchybridgecluster) diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index f5a3fc598..22902bcac 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -17,6 +17,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller/runtime" @@ -49,9 +50,9 @@ type PGUpgradeReconciler struct { } } -//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list,watch} -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list,watch} -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} +//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={get,list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={get,list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} // ManagedReconciler creates a [PGUpgradeReconciler] and adds it to m. func ManagedReconciler(m ctrl.Manager, r registration.Registration) error { @@ -71,11 +72,11 @@ func ManagedReconciler(m ctrl.Manager, r registration.Registration) error { Owns(&batchv1.Job{}). Watches( v1beta1.NewPostgresCluster(), - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []reconcile.Request { return runtime.Requests(reconciler.findUpgradesForPostgresCluster(ctx, client.ObjectKeyFromObject(cluster))...) }), ). - Complete(reconciler) + Complete(reconcile.AsReconciler(kubernetes, reconciler)) } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list} @@ -103,7 +104,6 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( return matching } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={get} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades/status",verbs={patch} //+kubebuilder:rbac:groups="batch",resources="jobs",verbs={delete} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get} @@ -114,42 +114,26 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( //+kubebuilder:rbac:groups="",resources="endpoints",verbs={delete} // Reconcile does the work to move the current state of the world toward the -// desired state described in a [v1beta1.PGUpgrade] identified by req. -func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, err error) { +// desired state described in upgrade. +func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, upgrade *v1beta1.PGUpgrade) (result ctrl.Result, err error) { ctx, span := tracing.Start(ctx, "reconcile-pgupgrade") log := logging.FromContext(ctx) defer span.End() defer func(s tracing.Span) { _ = tracing.Escape(s, err) }(span) - // Retrieve the upgrade from the client cache, if it exists. A deferred - // function below will send any changes to its Status field. - // - // NOTE: No DeepCopy is necessary here because controller-runtime makes a - // copy before returning from its cache. - // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 - upgrade := &v1beta1.PGUpgrade{} - err = r.Reader.Get(ctx, req.NamespacedName, upgrade) - - if err == nil { - // Write any changes to the upgrade status on the way out. - before := upgrade.DeepCopy() - defer func() { - if !equality.Semantic.DeepEqual(before.Status, upgrade.Status) { - status := r.StatusWriter.Patch(ctx, upgrade, client.MergeFrom(before)) - - if err == nil && status != nil { - err = status - } else if status != nil { - log.Error(status, "Patching PGUpgrade status") - } + // Write any changes to the upgrade status on the way out. + before := upgrade.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, upgrade.Status) { + status := r.StatusWriter.Patch(ctx, upgrade, client.MergeFrom(before)) + + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching PGUpgrade status") } - }() - } else { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from upgrade's dependents after - // upgrade is deleted. - return ctrl.Result{}, client.IgnoreNotFound(err) - } + } + }() // Validate the remainder of the upgrade specification. These can likely // move to CEL rules or a webhook when supported. diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index c56947a83..5ea4ace88 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -18,7 +18,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/feature" @@ -100,9 +99,7 @@ func TestCustomLabels(t *testing.T) { }) // Reconcile the cluster - result, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) + result, err := reconciler.Reconcile(ctx, cluster) assert.NilError(t, err) assert.Assert(t, result.Requeue == false) } @@ -339,9 +336,7 @@ func TestCustomAnnotations(t *testing.T) { }) // Reconcile the cluster - result, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) + result, err := reconciler.Reconcile(ctx, cluster) assert.NilError(t, err) assert.Assert(t, result.Requeue == false) } diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 09ddf1583..7d015c401 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -69,29 +69,15 @@ type Reconciler struct { } // +kubebuilder:rbac:groups="",resources="events",verbs={create,patch} -// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} // +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/status",verbs={patch} -// Reconcile reconciles a ConfigMap in a namespace managed by the PostgreSQL Operator func (r *Reconciler) Reconcile( - ctx context.Context, request reconcile.Request) (reconcile.Result, error, + ctx context.Context, cluster *v1beta1.PostgresCluster) (reconcile.Result, error, ) { ctx, span := tracing.Start(ctx, "reconcile-postgrescluster") log := logging.FromContext(ctx) defer span.End() - // get the postgrescluster from the cache - cluster := &v1beta1.PostgresCluster{} - if err := r.Reader.Get(ctx, request.NamespacedName, cluster); err != nil { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from cluster's dependents after - // cluster is deleted. - if err = client.IgnoreNotFound(err); err != nil { - log.Error(err, "unable to fetch PostgresCluster") - } - return runtime.ErrorWithBackoff(tracing.Escape(span, err)) - } - // Set any defaults that may not have been stored in the API. No DeepCopy // is necessary because controller-runtime makes a copy before returning // from its cache. @@ -455,6 +441,7 @@ func (r *Reconciler) setOwnerReference( // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={get,list,watch} // +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={get,list,watch} // +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={get,list,watch} +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} // ManagedReconciler creates a [Reconciler] and adds it to m. func ManagedReconciler(m manager.Manager, r registration.Registration) error { @@ -489,5 +476,5 @@ func ManagedReconciler(m manager.Manager, r registration.Registration) error { Watches(&corev1.Pod{}, reconciler.watchPods()). Watches(&appsv1.StatefulSet{}, reconciler.controllerRefHandlerFuncs()). // watch all StatefulSets - Complete(reconciler)) + Complete(reconcile.AsReconciler(kubernetes, reconciler))) } diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index a6f237b81..95358d488 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -166,9 +166,7 @@ var _ = Describe("PostgresCluster Reconciler", func() { reconcile := func(cluster *v1beta1.PostgresCluster) reconcile.Result { ctx := context.Background() - result, err := test.Reconciler.Reconcile(ctx, - reconcile.Request{NamespacedName: client.ObjectKeyFromObject(cluster)}, - ) + result, err := test.Reconciler.Reconcile(ctx, cluster) Expect(err).ToNot(HaveOccurred(), func() string { var t interface{ StackTrace() errors.StackTrace } if errors.As(err, &t) { diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 8a028913e..f00267974 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/collector" "github.com/crunchydata/postgres-operator/internal/controller/runtime" @@ -1233,9 +1232,7 @@ func TestDeleteInstance(t *testing.T) { // Reconcile the entire cluster so that we don't have to create all the // resources needed to reconcile a single instance (cm,secrets,svc, etc.) - result, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) + result, err := reconciler.Reconcile(ctx, cluster) assert.NilError(t, err) assert.Assert(t, result.Requeue == false) diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index fe205dcaf..7e3d0c835 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -14,10 +14,12 @@ import ( "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" @@ -48,7 +50,7 @@ type PGAdminReconciler struct { Recorder record.EventRecorder } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={get,list,watch} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} //+kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list,watch} //+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} @@ -56,7 +58,7 @@ type PGAdminReconciler struct { //+kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list,watch} // ManagedReconciler creates a [PGAdminReconciler] and adds it to m. -func ManagedReconciler(m ctrl.Manager) error { +func ManagedReconciler(m manager.Manager) error { exec, err := runtime.NewPodExecutor(m.GetConfig()) kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPGAdmin) recorder := m.GetEventRecorderFor(naming.ControllerPGAdmin) @@ -69,7 +71,7 @@ func ManagedReconciler(m ctrl.Manager) error { Writer: kubernetes, } - return errors.Join(err, ctrl.NewControllerManagedBy(m). + return errors.Join(err, builder.ControllerManagedBy(m). For(&v1beta1.PGAdmin{}). Owns(&corev1.ConfigMap{}). Owns(&corev1.PersistentVolumeClaim{}). @@ -78,39 +80,29 @@ func ManagedReconciler(m ctrl.Manager) error { Owns(&corev1.Service{}). Watches( v1beta1.NewPostgresCluster(), - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []reconcile.Request { return runtime.Requests(reconciler.findPGAdminsForPostgresCluster(ctx, cluster)...) }), ). Watches( &corev1.Secret{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []reconcile.Request { return runtime.Requests(reconciler.findPGAdminsForSecret(ctx, client.ObjectKeyFromObject(secret))...) }), ). - Complete(reconciler)) + Complete(reconcile.AsReconciler(kubernetes, reconciler))) } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={get} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins/status",verbs={patch} -// Reconcile which aims to move the current state of the pgAdmin closer to the -// desired state described in a [v1beta1.PGAdmin] identified by request. -func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// Reconcile moves the current state of pgAdmin closer to the state described in its specification. +func (r *PGAdminReconciler) Reconcile(ctx context.Context, pgAdmin *v1beta1.PGAdmin) (reconcile.Result, error) { var err error ctx, span := tracing.Start(ctx, "reconcile-pgadmin") log := logging.FromContext(ctx) defer span.End() - pgAdmin := &v1beta1.PGAdmin{} - if err := r.Reader.Get(ctx, req.NamespacedName, pgAdmin); err != nil { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from pgadmin's dependents after - // pgadmin is deleted. - return ctrl.Result{}, tracing.Escape(span, client.IgnoreNotFound(err)) - } - // Write any changes to the pgadmin status on the way out. before := pgAdmin.DeepCopy() defer func() { @@ -163,7 +155,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct log.V(1).Info("Reconciled pgAdmin") } - return ctrl.Result{}, tracing.Escape(span, err) + return reconcile.Result{}, tracing.Escape(span, err) } // The owner reference created by controllerutil.SetControllerReference blocks From bb6d201c24460114e0c8a5d9fe45fbd3388f9da6 Mon Sep 17 00:00:00 2001 From: ValClarkson Date: Mon, 15 Sep 2025 17:37:05 -0400 Subject: [PATCH 14/43] updated to latest released images --- .github/workflows/test.yaml | 46 ++++++++++++++++---------------- Makefile | 2 +- config/manager/manager.yaml | 20 +++++++------- testing/chainsaw/e2e/values.yaml | 2 +- 4 files changed, 35 insertions(+), 35 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 2ae50c8a6..f7110c92a 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -64,9 +64,9 @@ jobs: with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534 - run: make createnamespaces check-envtest-existing env: @@ -98,15 +98,15 @@ jobs: with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.3-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.4-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.5-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.5-3.4-2520 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.3-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.6-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534 - name: Get pgMonitor files. run: make get-pgmonitor @@ -126,16 +126,16 @@ jobs: --env 'CHECK_FOR_UPGRADES=false' \ --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ - --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520' \ - --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520' \ - --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2520' \ - --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.5-2520' \ - --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.3-2520' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.4-2520' \ - --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520' \ - --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.5-3.4-2520' \ - --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2520' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534' \ + --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2534' \ + --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.6-2534' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.3-2534' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534' \ + --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534' \ + --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2534' \ --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.2-0' \ --env 'PGO_FEATURE_GATES=TablespaceVolumes=true,OpenTelemetryLogs=true,OpenTelemetryMetrics=true' \ --name 'postgres-operator' localhost/postgres-operator @@ -152,7 +152,7 @@ jobs: KUTTL_PG_UPGRADE_TO_VERSION: '17' KUTTL_PG_VERSION: '16' KUTTL_POSTGIS_VERSION: '3.4' - KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534' - run: | make check-kuttl && exit failed=$? diff --git a/Makefile b/Makefile index 7305d666c..5f199818b 100644 --- a/Makefile +++ b/Makefile @@ -198,7 +198,7 @@ generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 16 generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 17 generate-kuttl: export KUTTL_PG_VERSION ?= 16 generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.4 -generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520 +generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534 generate-kuttl: export KUTTL_TEST_DELETE_NAMESPACE ?= kuttl-test-delete-namespace generate-kuttl: ## Generate kuttl tests [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index fc86b653e..24de85f5f 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,25 +23,25 @@ spec: - name: CRUNCHY_DEBUG value: "true" - name: RELATED_IMAGE_POSTGRES_16 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.3-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.3-2534" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.4-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534" - name: RELATED_IMAGE_POSTGRES_17 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534" - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.5-3.5-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534" - name: RELATED_IMAGE_PGBACKREST - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534" - name: RELATED_IMAGE_PGBOUNCER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534" - name: RELATED_IMAGE_PGEXPORTER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2534" - name: RELATED_IMAGE_PGUPGRADE - value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.5-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.6-2534" - name: RELATED_IMAGE_STANDALONE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2534" - name: RELATED_IMAGE_COLLECTOR value: "registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.2-0" securityContext: diff --git a/testing/chainsaw/e2e/values.yaml b/testing/chainsaw/e2e/values.yaml index 0c8a3ce58..152354e5d 100644 --- a/testing/chainsaw/e2e/values.yaml +++ b/testing/chainsaw/e2e/values.yaml @@ -2,4 +2,4 @@ versions: postgres: '17' images: - psql: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520' + psql: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534' From 56ccaf9a1b56d8512cf21a52fa2279a27d8eb3ff Mon Sep 17 00:00:00 2001 From: ValClarkson Date: Mon, 15 Sep 2025 17:39:41 -0400 Subject: [PATCH 15/43] updated release version --- .github/workflows/test.yaml | 2 +- config/manager/manager.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index f7110c92a..43fa3973d 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -136,7 +136,7 @@ jobs: --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534' \ --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534' \ --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2534' \ - --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.2-0' \ + --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.3-0' \ --env 'PGO_FEATURE_GATES=TablespaceVolumes=true,OpenTelemetryLogs=true,OpenTelemetryMetrics=true' \ --name 'postgres-operator' localhost/postgres-operator diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 24de85f5f..d2ea47e37 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -43,7 +43,7 @@ spec: - name: RELATED_IMAGE_STANDALONE_PGADMIN value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2534" - name: RELATED_IMAGE_COLLECTOR - value: "registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.2-0" + value: "registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.3-0" securityContext: allowPrivilegeEscalation: false capabilities: { drop: [ALL] } From 806baa1ad58d751515b09a29371de4078686fe60 Mon Sep 17 00:00:00 2001 From: Val Date: Tue, 16 Sep 2025 10:44:25 -0400 Subject: [PATCH 16/43] Update config/manager/manager.yaml Co-authored-by: Drew Sessler <36803518+dsessler7@users.noreply.github.com> --- config/manager/manager.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index d2ea47e37..e4d7e8980 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -30,7 +30,7 @@ spec: value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534" - name: RELATED_IMAGE_POSTGRES_17 value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534" - - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 + - name: RELATED_IMAGE_POSTGRES_17_GIS_3.5 value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534" - name: RELATED_IMAGE_PGBACKREST value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534" From 44edf36102484b3083f1d0621b939baef06facfc Mon Sep 17 00:00:00 2001 From: ValClarkson Date: Tue, 16 Sep 2025 18:20:51 -0400 Subject: [PATCH 17/43] added both Gis versions for pg17 and updated examples to use pg17 --- .github/workflows/test.yaml | 2 ++ config/manager/manager.yaml | 2 ++ examples/postgrescluster/postgrescluster.yaml | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 43fa3973d..780582498 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -107,6 +107,7 @@ jobs: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534 registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.6-2534 registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534 - name: Get pgMonitor files. run: make get-pgmonitor @@ -135,6 +136,7 @@ jobs: --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534' \ --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534' \ --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534' \ + --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.5=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534' \ --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2534' \ --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.3-0' \ --env 'PGO_FEATURE_GATES=TablespaceVolumes=true,OpenTelemetryLogs=true,OpenTelemetryMetrics=true' \ diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index e4d7e8980..75c3d4c52 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -30,6 +30,8 @@ spec: value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534" - name: RELATED_IMAGE_POSTGRES_17 value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534" + - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534" - name: RELATED_IMAGE_POSTGRES_17_GIS_3.5 value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534" - name: RELATED_IMAGE_PGBACKREST diff --git a/examples/postgrescluster/postgrescluster.yaml b/examples/postgrescluster/postgrescluster.yaml index 75756af94..91cc31a52 100644 --- a/examples/postgrescluster/postgrescluster.yaml +++ b/examples/postgrescluster/postgrescluster.yaml @@ -3,7 +3,7 @@ kind: PostgresCluster metadata: name: hippo spec: - postgresVersion: 16 + postgresVersion: 17 instances: - name: instance1 dataVolumeClaimSpec: From 0e8e7e3d75c6b66a98a70bb937c6dadc7858e68d Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 13 Nov 2024 15:42:26 -0600 Subject: [PATCH 18/43] Remove server-side apply tests for old Kubernetes Kubernetes 1.22 and OpenShift 4.8 have been out of commission for some time now. --- internal/controller/postgrescluster/apply.go | 3 +- .../controller/postgrescluster/apply_test.go | 94 +------------------ .../postgrescluster/controller_test.go | 76 +++++---------- .../controller/postgrescluster/suite_test.go | 27 +----- 4 files changed, 29 insertions(+), 171 deletions(-) diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index 88659cf39..ab7c23871 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -54,8 +54,7 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { func applyServiceSpec( patch *kubeapi.JSON6902, actual, intent corev1.ServiceSpec, path ...string, ) { - // Service.Spec.Selector is not +mapType=atomic until Kubernetes 1.22. - // - https://issue.k8s.io/97970 + // Service.Spec.Selector cannot be unset; perhaps https://issue.k8s.io/117447 if !equality.Semantic.DeepEqual(actual.Selector, intent.Selector) { patch.Replace(append(path, "selector")...)(intent.Selector) } diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index e06f29059..413ffbd13 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -14,7 +14,6 @@ import ( "github.com/google/go-cmp/cmp" "gotest.tools/v3/assert" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -144,41 +143,6 @@ func TestServerSideApply(t *testing.T) { ) }) - t.Run("StatefulSetStatus", func(t *testing.T) { - constructor := func(name string) *appsv1.StatefulSet { - var sts appsv1.StatefulSet - sts.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) - sts.Namespace, sts.Name = ns.Name, name - sts.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: map[string]string{"select": name}, - } - sts.Spec.Template.Labels = map[string]string{"select": name} - sts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "test", Image: "test"}} - return &sts - } - - cc := client.WithFieldOwner(cc, t.Name()) - reconciler := Reconciler{Writer: cc} - upstream := constructor("status-upstream") - - // The structs defined in "k8s.io/api/apps/v1" marshal empty status fields. - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.22")): - assert.ErrorContains(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership), - "field not declared in schema", - "expected https://issue.k8s.io/109210") - - default: - assert.NilError(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership)) - } - - // Our apply method generates the correct apply-patch. - again := constructor("status-local") - assert.NilError(t, reconciler.apply(ctx, again)) - }) - t.Run("ServiceSelector", func(t *testing.T) { constructor := func(name string) *corev1.Service { var service corev1.Service @@ -190,61 +154,6 @@ func TestServerSideApply(t *testing.T) { return &service } - t.Run("wrong-keys", func(t *testing.T) { - cc := client.WithFieldOwner(cc, t.Name()) - reconciler := Reconciler{Writer: cc} - - intent := constructor("some-selector") - intent.Spec.Selector = map[string]string{"k1": "v1"} - - // Create the Service. - before := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership)) - - // Something external mucks it up. - assert.NilError(t, - cc.Patch(ctx, before, - client.RawPatch(client.Merge.Type(), []byte(`{"spec":{"selector":{"bad":"v2"}}}`)), - client.FieldOwner("wrong"))) - - // client.Apply cannot correct it in old versions of Kubernetes. - after := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership)) - - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.22")): - - assert.Assert(t, len(after.Spec.Selector) != len(intent.Spec.Selector), - "expected https://issue.k8s.io/97970, got %v", after.Spec.Selector) - - default: - assert.DeepEqual(t, after.Spec.Selector, intent.Spec.Selector) - } - - // Our apply method corrects it. - again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) - assert.DeepEqual(t, again.Spec.Selector, intent.Spec.Selector) - - var count int - var managed *metav1.ManagedFieldsEntry - for i := range again.ManagedFields { - if again.ManagedFields[i].Manager == t.Name() { - count++ - managed = &again.ManagedFields[i] - } - } - - assert.Equal(t, count, 1, "expected manager once in %v", again.ManagedFields) - assert.Equal(t, managed.Operation, metav1.ManagedFieldsOperationApply) - - assert.Assert(t, managed.FieldsV1 != nil) - assert.Assert(t, strings.Contains(string(managed.FieldsV1.Raw), `"f:selector":{`), - "expected f:selector in %s", managed.FieldsV1.Raw) - }) - for _, tt := range []struct { name string selector map[string]string @@ -275,6 +184,9 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, cc.Patch(ctx, after, client.Apply, client.ForceOwnership)) + // Perhaps one of: + // - https://issue.k8s.io/117447 + // - https://github.com/kubernetes-sigs/structured-merge-diff/issues/259 assert.Assert(t, len(after.Spec.Selector) != len(intent.Spec.Selector), "got %v", after.Spec.Selector) diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 95358d488..5b6f3e4c7 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -340,59 +339,32 @@ spec: // // The "metadata.finalizers" field is also okay. // - https://book.kubebuilder.io/reference/using-finalizers.html - // - // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track - // managed fields on the status subresource: https://issue.k8s.io/88901 - switch { - case suite.ServerVersion.LessThan(version.MustParseGeneric("1.22")): - - // Kubernetes 1.22 began tracking subresources in managed fields. - // - https://pr.k8s.io/100970 - Expect(existing.ManagedFields).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(test.Owner), - "FieldsV1": PointTo(MatchAllFields(Fields{ - "Raw": WithTransform(func(in []byte) (out map[string]any) { - Expect(yaml.Unmarshal(in, &out)).To(Succeed()) - return out - }, MatchAllKeys(Keys{ - "f:metadata": MatchAllKeys(Keys{ - "f:finalizers": Not(BeZero()), - }), - "f:status": Not(BeZero()), - })), - })), - }), - ), `controller should manage only "finalizers" and "status"`) - - default: - Expect(existing.ManagedFields).To(ContainElements( - MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(test.Owner), - "FieldsV1": PointTo(MatchAllFields(Fields{ - "Raw": WithTransform(func(in []byte) (out map[string]any) { - Expect(yaml.Unmarshal(in, &out)).To(Succeed()) - return out - }, MatchAllKeys(Keys{ - "f:metadata": MatchAllKeys(Keys{ - "f:finalizers": Not(BeZero()), - }), - })), + Expect(existing.ManagedFields).To(ContainElements( + MatchFields(IgnoreExtras, Fields{ + "Manager": Equal(test.Owner), + "FieldsV1": PointTo(MatchAllFields(Fields{ + "Raw": WithTransform(func(in []byte) (out map[string]any) { + Expect(yaml.Unmarshal(in, &out)).To(Succeed()) + return out + }, MatchAllKeys(Keys{ + "f:metadata": MatchAllKeys(Keys{ + "f:finalizers": Not(BeZero()), + }), })), - }), - MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(test.Owner), - "FieldsV1": PointTo(MatchAllFields(Fields{ - "Raw": WithTransform(func(in []byte) (out map[string]any) { - Expect(yaml.Unmarshal(in, &out)).To(Succeed()) - return out - }, MatchAllKeys(Keys{ - "f:status": Not(BeZero()), - })), + })), + }), + MatchFields(IgnoreExtras, Fields{ + "Manager": Equal(test.Owner), + "FieldsV1": PointTo(MatchAllFields(Fields{ + "Raw": WithTransform(func(in []byte) (out map[string]any) { + Expect(yaml.Unmarshal(in, &out)).To(Succeed()) + return out + }, MatchAllKeys(Keys{ + "f:status": Not(BeZero()), })), - }), - ), `controller should manage only "finalizers" and "status"`) - } + })), + }), + ), `controller should manage only "finalizers" and "status"`) }) Specify("Patroni Distributed Configuration", func() { diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index ffb9d6f1e..f10983902 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -6,18 +6,12 @@ package postgrescluster import ( "context" - "os" - "strings" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/util/version" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/testing/require" @@ -25,11 +19,6 @@ import ( var suite struct { Client client.Client - Config *rest.Config - - ServerVersion *version.Version - - Manager manager.Manager } func TestAPIs(t *testing.T) { @@ -39,24 +28,10 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func() { - if os.Getenv("KUBEBUILDER_ASSETS") == "" && !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - Skip("skipping") - } + suite.Client = require.Kubernetes(GinkgoT()) logging.SetLogSink(logging.Logrus(GinkgoWriter, "test", 1, 1)) log.SetLogger(logging.FromContext(context.Background())) - - By("bootstrapping test environment") - suite.Config, suite.Client = require.Kubernetes2(GinkgoT()) - - dc, err := discovery.NewDiscoveryClientForConfig(suite.Config) - Expect(err).ToNot(HaveOccurred()) - - server, err := dc.ServerVersion() - Expect(err).ToNot(HaveOccurred()) - - suite.ServerVersion, err = version.ParseGeneric(server.GitVersion) - Expect(err).ToNot(HaveOccurred()) }) var _ = AfterSuite(func() { From b3d57df79128293f6013f638004d7833223c8652 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 13 Nov 2024 17:12:48 -0600 Subject: [PATCH 19/43] Move server-side apply function to a shared package Multiple controllers had a method for this, but the implementations differed slightly. This combines their fixes and tests them in a single place. --- internal/bridge/crunchybridgecluster/apply.go | 33 ----------- .../bridge/crunchybridgecluster/postgres.go | 3 +- internal/controller/pgupgrade/apply.go | 31 ---------- .../pgupgrade/pgupgrade_controller.go | 4 +- internal/controller/postgrescluster/apply.go | 43 +------------- .../postgrescluster/controller_ref_manager.go | 8 +-- internal/controller/runtime/apply.go | 58 +++++++++++++++++++ .../apply_test.go | 33 +++++------ internal/controller/runtime/client.go | 1 + internal/controller/runtime/conversion.go | 4 +- .../{kubeapi => controller/runtime}/patch.go | 2 +- .../runtime}/patch_test.go | 2 +- .../controller/standalone_pgadmin/apply.go | 33 ----------- .../standalone_pgadmin/configmap.go | 3 +- .../controller/standalone_pgadmin/service.go | 3 +- .../standalone_pgadmin/statefulset.go | 3 +- .../controller/standalone_pgadmin/users.go | 3 +- .../controller/standalone_pgadmin/volume.go | 3 +- 18 files changed, 98 insertions(+), 172 deletions(-) delete mode 100644 internal/bridge/crunchybridgecluster/apply.go delete mode 100644 internal/controller/pgupgrade/apply.go create mode 100644 internal/controller/runtime/apply.go rename internal/controller/{postgrescluster => runtime}/apply_test.go (90%) rename internal/{kubeapi => controller/runtime}/patch.go (99%) rename internal/{kubeapi => controller/runtime}/patch_test.go (99%) delete mode 100644 internal/controller/standalone_pgadmin/apply.go diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go deleted file mode 100644 index 850920fa8..000000000 --- a/internal/bridge/crunchybridgecluster/apply.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package crunchybridgecluster - -import ( - "context" - "reflect" - - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set by -// r.Writer and the force parameter is true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -// -// NOTE: This function is duplicated from a version in the postgrescluster package -func (r *CrunchyBridgeClusterReconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Send the apply-patch with force=true. - if err == nil { - err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) - } - - return err -} diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go index 0aa09517d..f8b8bf6b1 100644 --- a/internal/bridge/crunchybridgecluster/postgres.go +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -15,6 +15,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -152,7 +153,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( roleSecrets[roleName], err = r.generatePostgresRoleSecret(cluster, role, clusterRole) } if err == nil { - err = errors.WithStack(r.apply(ctx, roleSecrets[roleName])) + err = errors.WithStack(runtime.Apply(ctx, r.Writer, roleSecrets[roleName])) } if err != nil { log.Error(err, "Issue creating role secret.") diff --git a/internal/controller/pgupgrade/apply.go b/internal/controller/pgupgrade/apply.go deleted file mode 100644 index c3e869eba..000000000 --- a/internal/controller/pgupgrade/apply.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package pgupgrade - -import ( - "context" - "reflect" - - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set by -// r.Writer and the force parameter is true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -func (r *PGUpgradeReconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Send the apply-patch with force=true. - if err == nil { - err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) - } - - return err -} diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index 22902bcac..61eb39a7c 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -437,7 +437,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, upgrade *v1beta1.PG // TODO: error from apply could mean that the job exists with a different spec. if err == nil && !upgradeJobComplete { - err = errors.WithStack(r.apply(ctx, + err = errors.WithStack(runtime.Apply(ctx, r.Writer, r.generateUpgradeJob(ctx, upgrade, world.ClusterPrimary, config.FetchKeyCommand(&world.Cluster.Spec)))) } @@ -448,7 +448,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, upgrade *v1beta1.PG if err == nil && upgradeJobComplete && !removeDataJobsComplete { for _, sts := range world.ClusterReplicas { if err == nil { - err = r.apply(ctx, r.generateRemoveDataJob(ctx, upgrade, sts)) + err = runtime.Apply(ctx, r.Writer, r.generateRemoveDataJob(ctx, upgrade, sts)) } } } diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index ab7c23871..22aa1d3ce 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -6,13 +6,10 @@ package postgrescluster import ( "context" - "reflect" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" ) // apply sends an apply patch to object's endpoint in the Kubernetes API and @@ -21,41 +18,5 @@ import ( // - https://docs.k8s.io/reference/using-api/server-side-apply/#managers // - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts func (r *Reconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Keep a copy of the object before any API calls. - intent := object.DeepCopyObject() - patch := kubeapi.NewJSONPatch() - - // Send the apply-patch with force=true. - if err == nil { - err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) - } - - // Some fields cannot be server-side applied correctly. When their outcome - // does not match the intent, send a json-patch to get really specific. - switch actual := object.(type) { - case *corev1.Service: - applyServiceSpec(patch, actual.Spec, intent.(*corev1.Service).Spec, "spec") - } - - // Send the json-patch when necessary. - if err == nil && !patch.IsEmpty() { - err = r.Writer.Patch(ctx, object, patch) - } - return err -} - -// applyServiceSpec is called by Reconciler.apply to work around issues -// with server-side apply. -func applyServiceSpec( - patch *kubeapi.JSON6902, actual, intent corev1.ServiceSpec, path ...string, -) { - // Service.Spec.Selector cannot be unset; perhaps https://issue.k8s.io/117447 - if !equality.Semantic.DeepEqual(actual.Selector, intent.Selector) { - patch.Replace(append(path, "selector")...)(intent.Selector) - } + return runtime.Apply(ctx, r.Writer, object) } diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index fc814259b..e73b1701f 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -16,7 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -31,7 +31,7 @@ func (r *Reconciler) adoptObject(ctx context.Context, postgresCluster *v1beta1.P return err } - patchBytes, err := kubeapi.NewMergePatch(). + patchBytes, err := runtime.NewMergePatch(). Add("metadata", "ownerReferences")(obj.GetOwnerReferences()).Bytes() if err != nil { return err @@ -160,8 +160,8 @@ func (r *Reconciler) manageControllerRefs(ctx context.Context, func (r *Reconciler) releaseObject(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, obj client.Object) error { - // TODO create a strategic merge type in kubeapi instead of using Merge7386 - patch, err := kubeapi.NewMergePatch(). + // TODO create a strategic merge type instead of using Merge7386 + patch, err := runtime.NewMergePatch(). Add("metadata", "ownerReferences")([]map[string]string{{ "$patch": "delete", "uid": string(postgresCluster.GetUID()), diff --git a/internal/controller/runtime/apply.go b/internal/controller/runtime/apply.go new file mode 100644 index 000000000..18926488e --- /dev/null +++ b/internal/controller/runtime/apply.go @@ -0,0 +1,58 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Apply sends an apply patch with force=true using cc and updates object with any returned content. +// The client is responsible for setting fieldManager; see [client.WithFieldOwner]. +// +// - https://docs.k8s.io/reference/using-api/server-side-apply#managers +// - https://docs.k8s.io/reference/using-api/server-side-apply#conflicts +func Apply[ + // NOTE: This interface can go away following https://go.dev/issue/47487. + ClientPatch interface { + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + }, + T interface{ client.Object }, +](ctx context.Context, cc ClientPatch, object T) error { + // Generate an apply-patch by comparing the object to its zero value. + data, err := client.MergeFrom(*new(T)).Data(object) + apply := client.RawPatch(client.Apply.Type(), data) + + // Keep a copy of the object before any API calls. + intent := object.DeepCopyObject() + + // Send the apply-patch with force=true. + if err == nil { + err = cc.Patch(ctx, object, apply, client.ForceOwnership) + } + + // Some fields cannot be server-side applied correctly. + // When their outcome does not match the intent, send a json-patch to get really specific. + patch := NewJSONPatch() + + switch actual := any(object).(type) { + case *corev1.Service: + intent := intent.(*corev1.Service) + + // Service.Spec.Selector cannot be unset; perhaps https://issue.k8s.io/117447 + if !equality.Semantic.DeepEqual(actual.Spec.Selector, intent.Spec.Selector) { + patch.Replace("spec", "selector")(intent.Spec.Selector) + } + } + + // Send the json-patch when necessary. + if err == nil && !patch.IsEmpty() { + err = cc.Patch(ctx, object, patch) + } + return err +} diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/runtime/apply_test.go similarity index 90% rename from internal/controller/postgrescluster/apply_test.go rename to internal/controller/runtime/apply_test.go index 413ffbd13..c6f182dde 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/runtime/apply_test.go @@ -2,10 +2,9 @@ // // SPDX-License-Identifier: Apache-2.0 -package postgrescluster +package runtime_test import ( - "context" "errors" "regexp" "strings" @@ -23,17 +22,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/testing/require" ) func TestServerSideApply(t *testing.T) { - ctx := context.Background() - cfg, cc := setupKubernetes(t) + ctx := t.Context() + config, base := require.Kubernetes2(t) require.ParallelCapacity(t, 0) - ns := setupNamespace(t, cc) + ns := require.Namespace(t, base) - dc, err := discovery.NewDiscoveryClientForConfig(cfg) + dc, err := discovery.NewDiscoveryClientForConfig(config) assert.NilError(t, err) server, err := dc.ServerVersion() @@ -43,8 +43,7 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, err) t.Run("ObjectMeta", func(t *testing.T) { - cc := client.WithFieldOwner(cc, t.Name()) - reconciler := Reconciler{Writer: cc} + cc := client.WithFieldOwner(base, t.Name()) constructor := func() *corev1.ConfigMap { var cm corev1.ConfigMap cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) @@ -78,17 +77,16 @@ func TestServerSideApply(t *testing.T) { assert.Assert(t, after.GetResourceVersion() == before.GetResourceVersion()) } - // Our apply method generates the correct apply-patch. + // Our [runtime.Apply] generates the correct apply-patch. again := constructor() - assert.NilError(t, reconciler.apply(ctx, again)) + assert.NilError(t, runtime.Apply(ctx, cc, again)) assert.Assert(t, again.GetResourceVersion() != "") assert.Assert(t, again.GetResourceVersion() == after.GetResourceVersion(), "expected to correctly no-op") }) t.Run("ControllerReference", func(t *testing.T) { - cc := client.WithFieldOwner(cc, t.Name()) - reconciler := Reconciler{Writer: cc} + cc := client.WithFieldOwner(base, t.Name()) // Setup two possible controllers. controller1 := new(corev1.ConfigMap) @@ -128,8 +126,8 @@ func TestServerSideApply(t *testing.T) { assert.Assert(t, len(status.ErrStatus.Details.Causes) != 0) assert.Equal(t, status.ErrStatus.Details.Causes[0].Field, "metadata.ownerReferences") - // Try to change the controller using our apply method. - err2 := reconciler.apply(ctx, applied) + // Try to change the controller using our [runtime.Apply]. + err2 := runtime.Apply(ctx, cc, applied) // Same result; patch not accepted. assert.DeepEqual(t, err1, err2, @@ -162,8 +160,7 @@ func TestServerSideApply(t *testing.T) { {"empty", make(map[string]string)}, } { t.Run(tt.name, func(t *testing.T) { - cc := client.WithFieldOwner(cc, t.Name()) - reconciler := Reconciler{Writer: cc} + cc := client.WithFieldOwner(base, t.Name()) intent := constructor(tt.name + "-selector") intent.Spec.Selector = tt.selector @@ -190,9 +187,9 @@ func TestServerSideApply(t *testing.T) { assert.Assert(t, len(after.Spec.Selector) != len(intent.Spec.Selector), "got %v", after.Spec.Selector) - // Our apply method corrects it. + // Our [runtime.Apply] corrects it. again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) + assert.NilError(t, runtime.Apply(ctx, cc, again)) assert.Assert(t, equality.Semantic.DeepEqual(again.Spec.Selector, intent.Spec.Selector), "\n--- again.Spec.Selector\n+++ intent.Spec.Selector\n%v", diff --git a/internal/controller/runtime/client.go b/internal/controller/runtime/client.go index c41fe5a9c..1bdbdddd1 100644 --- a/internal/controller/runtime/client.go +++ b/internal/controller/runtime/client.go @@ -76,6 +76,7 @@ func (fn ClientUpdate) Update(ctx context.Context, obj client.Object, opts ...cl return fn(ctx, obj, opts...) } +// WarningHandler implements [rest.WarningHandler] and [rest.WarningHandlerWithContext] as a single function. type WarningHandler func(ctx context.Context, code int, agent string, text string) func (fn WarningHandler) HandleWarningHeader(code int, agent string, text string) { diff --git a/internal/controller/runtime/conversion.go b/internal/controller/runtime/conversion.go index ae4495e86..57f7938f3 100644 --- a/internal/controller/runtime/conversion.go +++ b/internal/controller/runtime/conversion.go @@ -50,7 +50,7 @@ func FromUnstructuredObject[ FromUnstructured(object.UnstructuredContent(), result) } -// ToUnstructuredList returns a copy of list by marshaling through JSON. +// ToUnstructuredList returns a copy of list using reflection. func ToUnstructuredList(list client.ObjectList) (*unstructured.UnstructuredList, error) { content, err := runtime. DefaultUnstructuredConverter. @@ -61,7 +61,7 @@ func ToUnstructuredList(list client.ObjectList) (*unstructured.UnstructuredList, return result, err } -// ToUnstructuredObject returns a copy of object by marshaling through JSON. +// ToUnstructuredObject returns a copy of object using reflection. func ToUnstructuredObject(object client.Object) (*unstructured.Unstructured, error) { content, err := runtime. DefaultUnstructuredConverter. diff --git a/internal/kubeapi/patch.go b/internal/controller/runtime/patch.go similarity index 99% rename from internal/kubeapi/patch.go rename to internal/controller/runtime/patch.go index 95bcc9a6e..955b93e1d 100644 --- a/internal/kubeapi/patch.go +++ b/internal/controller/runtime/patch.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package kubeapi +package runtime import ( "strings" diff --git a/internal/kubeapi/patch_test.go b/internal/controller/runtime/patch_test.go similarity index 99% rename from internal/kubeapi/patch_test.go rename to internal/controller/runtime/patch_test.go index 05bd14006..07092be06 100644 --- a/internal/kubeapi/patch_test.go +++ b/internal/controller/runtime/patch_test.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package kubeapi +package runtime import ( "encoding/json" diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go deleted file mode 100644 index 23df91192..000000000 --- a/internal/controller/standalone_pgadmin/apply.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package standalone_pgadmin - -import ( - "context" - "reflect" - - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set by -// r.Writer and the force parameter is true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -// -// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. -func (r *PGAdminReconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Send the apply-patch with force=true. - if err == nil { - err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) - } - - return err -} diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go index d2378802c..95c0bd9be 100644 --- a/internal/controller/standalone_pgadmin/configmap.go +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -19,6 +19,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/collector" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -43,7 +44,7 @@ func (r *PGAdminReconciler) reconcilePGAdminConfigMap( err = errors.WithStack(r.setControllerReference(pgadmin, configmap)) } if err == nil { - err = errors.WithStack(r.apply(ctx, configmap)) + err = errors.WithStack(runtime.Apply(ctx, r.Writer, configmap)) } return configmap, err diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go index 8f21da476..43835b31d 100644 --- a/internal/controller/standalone_pgadmin/service.go +++ b/internal/controller/standalone_pgadmin/service.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -100,7 +101,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( return err } - return errors.WithStack(r.apply(ctx, service)) + return errors.WithStack(runtime.Apply(ctx, r.Writer, service)) } // If we get here then ServiceName was not provided through the spec diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index 8e507acda..a431ad5d3 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -16,6 +16,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/collector" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/util" @@ -55,7 +56,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( if err := errors.WithStack(r.setControllerReference(pgadmin, sts)); err != nil { return err } - return errors.WithStack(r.apply(ctx, sts)) + return errors.WithStack(runtime.Apply(ctx, r.Writer, sts)) } // statefulset defines the StatefulSet needed to run pgAdmin. diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 959437762..e89705b63 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -18,6 +18,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -297,7 +298,7 @@ cd $PGADMIN_DIR err = errors.WithStack(r.setControllerReference(pgadmin, intentUserSecret)) if err == nil { - err = errors.WithStack(r.apply(ctx, intentUserSecret)) + err = errors.WithStack(runtime.Apply(ctx, r.Writer, intentUserSecret)) } return err diff --git a/internal/controller/standalone_pgadmin/volume.go b/internal/controller/standalone_pgadmin/volume.go index a3e26682e..a4d0a5e13 100644 --- a/internal/controller/standalone_pgadmin/volume.go +++ b/internal/controller/standalone_pgadmin/volume.go @@ -14,6 +14,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -32,7 +33,7 @@ func (r *PGAdminReconciler) reconcilePGAdminDataVolume( if err == nil { err = r.handlePersistentVolumeClaimError(pgadmin, - errors.WithStack(r.apply(ctx, pvc))) + errors.WithStack(runtime.Apply(ctx, r.Writer, pvc))) } return pvc, err From 5a08d756cbe199d58be67e292287322c2ecd5093 Mon Sep 17 00:00:00 2001 From: Benjamin Blattberg Date: Thu, 25 Sep 2025 11:23:52 -0500 Subject: [PATCH 20/43] Update LICENSE.txt (#4303) --- licenses/LICENSE.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/licenses/LICENSE.txt b/licenses/LICENSE.txt index e799dc320..57f177367 100644 --- a/licenses/LICENSE.txt +++ b/licenses/LICENSE.txt @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. + Copyright 2017 - 2025 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 313f2a7653d440b3fbcc92c3c3f82dcfe1eb3ca3 Mon Sep 17 00:00:00 2001 From: Benjamin Blattberg Date: Fri, 26 Sep 2025 10:54:41 -0500 Subject: [PATCH 21/43] Update Makefile (#4301) Our Dockerfile expects to find the pgmonitor queries so we should run get-pgmonitor as part of the build target --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 5f199818b..7557fbcc1 100644 --- a/Makefile +++ b/Makefile @@ -132,6 +132,7 @@ deploy-dev: createnamespaces .PHONY: build build: ## Build a postgres-operator image +build: get-pgmonitor $(BUILDAH) build --tag localhost/postgres-operator \ --label org.opencontainers.image.authors='Crunchy Data' \ --label org.opencontainers.image.description='Crunchy PostgreSQL Operator' \ From c8a6ba2cd3da39f7db8b6b6f602c10017bf187c7 Mon Sep 17 00:00:00 2001 From: Drew Sessler Date: Tue, 23 Sep 2025 11:49:00 -0700 Subject: [PATCH 22/43] Add ImageVolumeSource to AdditionalVolumes. Add validation to ensure that user must choose between an image volume source and a pvc claim. Add validation to ensure that user cannot set readOnly to false when using an image volume source. Add validation to ensure that user sets a reference when using an image volume source. Add tests for using ImageVolumeSource in AdditionalVolume feature. --- ...res-operator.crunchydata.com_pgadmins.yaml | 30 +- ...ator.crunchydata.com_postgresclusters.yaml | 460 +++++++++++++++++- .../validation/postgrescluster_test.go | 165 +++++++ internal/util/volumes.go | 8 +- internal/util/volumes_test.go | 54 ++ .../v1beta1/shared_types.go | 17 +- .../v1beta1/shared_types_test.go | 19 + .../v1beta1/zz_generated.deepcopy.go | 5 + 8 files changed, 740 insertions(+), 18 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index c729da25e..d55e4d24b 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -2620,6 +2620,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -2633,10 +2654,17 @@ spec: read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' + - message: if using an ImageVolumeSource, you must set a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index ce139e4c7..3fef71364 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1567,6 +1567,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -1580,10 +1601,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using + an ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' + - message: if using an ImageVolumeSource, you must + set a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -2999,6 +3030,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -3012,10 +3064,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using + an ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' + - message: if using an ImageVolumeSource, you must + set a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -4434,6 +4496,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -4447,10 +4530,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using + an ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' + - message: if using an ImageVolumeSource, you must + set a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -6836,6 +6929,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -6849,10 +6963,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -7952,6 +8076,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -7965,10 +8110,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -11273,6 +11428,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -11286,10 +11462,19 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -16283,6 +16468,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -16296,10 +16502,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -20424,6 +20640,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -20437,10 +20674,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using + an ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' + - message: if using an ImageVolumeSource, you must + set a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -21856,6 +22103,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -21869,10 +22137,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using + an ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' + - message: if using an ImageVolumeSource, you must + set a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -23291,6 +23569,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -23304,10 +23603,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using + an ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' + - message: if using an ImageVolumeSource, you must + set a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -25671,6 +25980,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -25684,10 +26014,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -26787,6 +27127,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -26800,10 +27161,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -30108,6 +30479,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -30121,10 +30513,19 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -35117,6 +35518,27 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: Details for adding an image volume + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object name: description: |- The name of the directory in which to mount this volume. @@ -35130,10 +35552,20 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: readOnly cannot be set false when using an + ImageVolumeSource + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' + - message: if using an ImageVolumeSource, you must set + a reference + rule: '!has(self.image) || (self.?image.reference.hasValue() + && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go index a4c052ee8..7060a7933 100644 --- a/internal/testing/validation/postgrescluster_test.go +++ b/internal/testing/validation/postgrescluster_test.go @@ -9,7 +9,10 @@ import ( "testing" "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/testing/require" v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" @@ -103,3 +106,165 @@ func TestPostgresUserInterfaceAcrossVersions(t *testing.T) { "userInterface not available in v1") }) } + +func TestAdditionalVolumes(t *testing.T) { + ctx := context.Background() + cc := require.KubernetesAtLeast(t, "1.30") + t.Parallel() + + namespace := require.Namespace(t, cc) + base := v1beta1.NewPostgresCluster() + + base.Namespace = namespace.Name + base.Name = "image-volume-source-test" + // required fields + require.UnmarshalInto(t, &base.Spec, `{ + postgresVersion: 16, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + }`) + + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base to be valid") + + var unstructuredBase unstructured.Unstructured + require.UnmarshalInto(t, &unstructuredBase, require.Value(yaml.Marshal(base))) + + t.Run("Cannot set both image and claimName", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim", + image: { + reference: "test-image", + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "you must set only one of image or claimName") + }) + + t.Run("Cannot set readOnly to false when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + reference: "test-image", + pullPolicy: Always + }, + readOnly: false + }] + } + }]`, "spec", "instances") + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "readOnly cannot be set false when using an ImageVolumeSource") + }) + + t.Run("Reference must be set when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "if using an ImageVolumeSource, you must set a reference") + }) + + t.Run("Reference cannot be an empty string when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + reference: "", + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "if using an ImageVolumeSource, you must set a reference") + }) + + t.Run("ReadOnly can be omitted or set true when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + name: "test-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + reference: "test-image", + pullPolicy: Always + }, + }] + } + }, { + name: "another-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "another", + image: { + reference: "another-image", + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll)) + }) +} diff --git a/internal/util/volumes.go b/internal/util/volumes.go index 4151eef76..9b550ad4a 100644 --- a/internal/util/volumes.go +++ b/internal/util/volumes.go @@ -58,7 +58,13 @@ func addVolumesAndMounts(pod *corev1.PodSpec, volumes []v1beta1.AdditionalVolume missingContainers := []string{} for _, spec := range volumes { - mount := namer(spec.Name, spec.ReadOnly) + // If it is an image volume, override readOnly to true + readOnly := spec.ReadOnly + if spec.Image != nil { + readOnly = true + } + + mount := namer(spec.Name, readOnly) pod.Volumes = append(pod.Volumes, spec.AsVolume(mount.Name)) // Create a set of all the requested containers, diff --git a/internal/util/volumes_test.go b/internal/util/volumes_test.go index ee5ebaff9..ff2d0e762 100644 --- a/internal/util/volumes_test.go +++ b/internal/util/volumes_test.go @@ -207,6 +207,60 @@ func TestAddAdditionalVolumesAndMounts(t *testing.T) { claimName: required readOnly: true`, expectedMissing: []string{}, + }, { + tcName: "image volumes - readOnly overridden true", + additionalVolumes: []v1beta1.AdditionalVolume{{ + Containers: []string{"database"}, + Image: &corev1.ImageVolumeSource{ + Reference: "some-image-name", + PullPolicy: corev1.PullAlways, + }, + Name: "required", + ReadOnly: true, + }, { + Image: &corev1.ImageVolumeSource{ + Reference: "another-image-name", + PullPolicy: corev1.PullAlways, + }, + Name: "other", + ReadOnly: false, + }}, + expectedContainers: `- name: database + resources: {} + volumeMounts: + - mountPath: /volumes/required + name: volumes-required + readOnly: true + - mountPath: /volumes/other + name: volumes-other + readOnly: true +- name: other + resources: {} + volumeMounts: + - mountPath: /volumes/other + name: volumes-other + readOnly: true`, + expectedInitContainers: `- name: startup + resources: {} + volumeMounts: + - mountPath: /volumes/other + name: volumes-other + readOnly: true +- name: config + resources: {} + volumeMounts: + - mountPath: /volumes/other + name: volumes-other + readOnly: true`, + expectedVolumes: `- image: + pullPolicy: Always + reference: some-image-name + name: volumes-required +- image: + pullPolicy: Always + reference: another-image-name + name: volumes-other`, + expectedMissing: []string{}, }} for _, tc := range testCases { diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index 48a192cb8..79c343524 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -311,14 +311,18 @@ func (meta *Metadata) GetAnnotationsOrNil() map[string]string { // Only one applier should be managing each volume definition. // https://docs.k8s.io/reference/using-api/server-side-apply#merge-strategy // +structType=atomic +// +// +kubebuilder:validation:XValidation:rule=`has(self.claimName) != has(self.image)`,message=`you must set only one of image or claimName` +// +kubebuilder:validation:XValidation:rule=`!has(self.image) || !has(self.readOnly) || self.readOnly`,message=`readOnly cannot be set false when using an ImageVolumeSource` +// +kubebuilder:validation:XValidation:rule=`!has(self.image) || (self.?image.reference.hasValue() && self.image.reference.size() > 0)`,message=`if using an ImageVolumeSource, you must set a reference` type AdditionalVolume struct { // Name of an existing PersistentVolumeClaim. // --- // https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/core/validation#ValidatePersistentVolumeClaim // https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/core/validation#ValidatePersistentVolumeName // - // +required - ClaimName DNS1123Subdomain `json:"claimName"` + // +optional + ClaimName DNS1123Subdomain `json:"claimName,omitempty"` // The names of containers in which to mount this volume. // The default mounts the volume in *all* containers. An empty list does not mount the volume to any containers. @@ -333,6 +337,13 @@ type AdditionalVolume struct { // +optional Containers []DNS1123Label `json:"containers"` + // Details for adding an image volume + // --- + // https://docs.k8s.io/concepts/storage/volumes#image + // + // +optional + Image *corev1.ImageVolumeSource `json:"image,omitempty"` + // The name of the directory in which to mount this volume. // Volumes are mounted in containers at `/volumes/{name}`. // --- @@ -366,6 +377,8 @@ func (in *AdditionalVolume) AsVolume(name string) corev1.Volume { ClaimName: in.ClaimName, ReadOnly: in.ReadOnly, } + case in.Image != nil: + out.Image = in.Image.DeepCopy() } return out diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go index 1185321fe..8a1f22be2 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go @@ -45,6 +45,25 @@ func TestAdditionalVolumeAsVolume(t *testing.T) { assert.DeepEqual(t, out, expected) }) }) + + t.Run("Image", func(t *testing.T) { + in := v1beta1.AdditionalVolume{Image: &corev1.ImageVolumeSource{ + Reference: "jkl;", + PullPolicy: corev1.PullAlways, + }} + out := in.AsVolume("asdf") + + var expected corev1.Volume + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: asdf, + image: { + reference: jkl;, + pullPolicy: Always, + }, + }`), &expected)) + + assert.DeepEqual(t, out, expected) + }) } func TestDurationAsDuration(t *testing.T) { diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index ac271ad54..2d1301c2d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -41,6 +41,11 @@ func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) { *out = make([]DNS1123Label, len(*in)) copy(*out, *in) } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(corev1.ImageVolumeSource) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolume. From 20ff72928548efa0a72c0d06063089589dba5eb2 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Mon, 15 Sep 2025 15:48:28 -0500 Subject: [PATCH 23/43] Add a Make target that runs crd-schema-checker This tool estimates the total and relative cost of CEL validation rules. See: https://github.com/openshift/crd-schema-checker --- Makefile | 13 +++++++++++++ hack/check-manifests.awk | 26 ++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 hack/check-manifests.awk diff --git a/Makefile b/Makefile index 7557fbcc1..e4e2cd4f6 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,7 @@ CONTROLLER ?= $(GO) tool sigs.k8s.io/controller-tools/cmd/controller-gen # Run tests using the latest tools. CHAINSAW ?= $(GO) run github.com/kyverno/chainsaw@latest CHAINSAW_TEST ?= $(CHAINSAW) test +CRD_CHECKER ?= $(GO) run github.com/openshift/crd-schema-checker/cmd/crd-schema-checker@latest ENVTEST ?= $(GO) run sigs.k8s.io/controller-runtime/tools/setup-envtest@latest KUTTL ?= $(GO) run github.com/kudobuilder/kuttl/cmd/kubectl-kuttl@latest KUTTL_TEST ?= $(KUTTL) test @@ -148,6 +149,12 @@ check: ## Run basic go tests with coverage output check: get-pgmonitor QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" $(GO_TEST) -cover ./... +# Informational only; no criteria to enforce at this time. +.PHONY: check-crd +check-crd: + $(foreach CRD, $(wildcard config/crd/bases/*.yaml), \ + $(CRD_CHECKER) check-manifests --new-crd-filename '$(CRD)' 2>&1 | awk -f hack/check-manifests.awk $(newline)) + # Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' # - KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true .PHONY: check-envtest @@ -254,3 +261,9 @@ generate-rbac: ## Generate RBAC ) rbac:roleName='postgres-operator' $(\ ) paths='./cmd/...' paths='./internal/...' $(\ ) output:dir='config/rbac' # {directory}/role.yaml + +# https://www.gnu.org/software/make/manual/make.html#Multi_002dLine +define newline + + +endef diff --git a/hack/check-manifests.awk b/hack/check-manifests.awk new file mode 100644 index 000000000..0e6e23fff --- /dev/null +++ b/hack/check-manifests.awk @@ -0,0 +1,26 @@ +# Copyright 2025 Crunchy Data Solutions, Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +## TODO: Exit successfully only when there are no errors. +#/^ERROR:/ { rc = 1 } +#END { exit rc } + +# Shorten these frequent messages about validation rules. +/The maximum allowable value is 10000000[.]/ { + sub(/ The maximum allowable value is 10000000./, "") + sub(/ allowed budget/, "&, 10M") +} + +# These are informational, but "MustNot" sounds like something is wrong. +/^info: "MustNotExceedCostBudget"/ { + sub(/"MustNotExceedCostBudget"/, "\"CostBudget\"") +} + +# Color errors and warnings when attached to a terminal. +ENVIRON["MAKE_TERMOUT"] != "" { + sub(/^ERROR:/, "\033[0;31m&\033[0m") + sub(/^Warning:/, "\033[1;33m&\033[0m") +} + +{ print } From c06d3abf891c445343be4d9975035ff5d0c89e81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Sep 2025 00:02:23 +0000 Subject: [PATCH 24/43] Bump the go-dependencies group across 1 directory with 3 updates Bumps the go-dependencies group with 3 updates in the / directory: [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo), [golang.org/x/crypto](https://github.com/golang/crypto) and [golang.org/x/tools](https://github.com/golang/tools). Updates `github.com/onsi/ginkgo/v2` from 2.25.2 to 2.25.3 - [Release notes](https://github.com/onsi/ginkgo/releases) - [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/ginkgo/compare/v2.25.2...v2.25.3) Updates `golang.org/x/crypto` from 0.41.0 to 0.42.0 - [Commits](https://github.com/golang/crypto/compare/v0.41.0...v0.42.0) Updates `golang.org/x/tools` from 0.36.0 to 0.37.0 - [Release notes](https://github.com/golang/tools/releases) - [Commits](https://github.com/golang/tools/compare/v0.36.0...v0.37.0) --- updated-dependencies: - dependency-name: github.com/onsi/ginkgo/v2 dependency-version: 2.25.3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: go-dependencies - dependency-name: golang.org/x/crypto dependency-version: 0.42.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-dependencies - dependency-name: golang.org/x/tools dependency-version: 0.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 18 +++++++++--------- go.sum | 36 ++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 0ec845635..3cdfaca91 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/google/uuid v1.6.0 github.com/itchyny/gojq v0.12.17 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 - github.com/onsi/ginkgo/v2 v2.25.2 + github.com/onsi/ginkgo/v2 v2.25.3 github.com/onsi/gomega v1.38.2 github.com/pganalyze/pg_query_go/v6 v6.1.0 github.com/pkg/errors v0.9.1 @@ -22,8 +22,8 @@ require ( go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/sdk v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 - golang.org/x/crypto v0.41.0 - golang.org/x/tools v0.36.0 + golang.org/x/crypto v0.42.0 + golang.org/x/tools v0.37.0 gotest.tools/v3 v3.5.2 k8s.io/api v0.33.4 k8s.io/apimachinery v0.33.4 @@ -113,13 +113,13 @@ require ( go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.27.0 // indirect - golang.org/x/net v0.43.0 // indirect + golang.org/x/mod v0.28.0 // indirect + golang.org/x/net v0.44.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 757aed03f..baf3d4f93 100644 --- a/go.sum +++ b/go.sum @@ -124,8 +124,8 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= -github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= +github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/pganalyze/pg_query_go/v6 v6.1.0 h1:jG5ZLhcVgL1FAw4C/0VNQaVmX1SUJx71wBGdtTtBvls= @@ -250,31 +250,31 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -285,18 +285,18 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -304,8 +304,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= From c9c2a533270874ac2905f3090e4338512e1149a0 Mon Sep 17 00:00:00 2001 From: TJ Moore Date: Wed, 10 Sep 2025 03:21:23 -0400 Subject: [PATCH 25/43] Updates to support changes starting in pgAdmin 9.3 Changes in the flags used by pgAdmin's setup.py for user managment start in pgAdmin 9.3. Issue: PGO-2686 --- ...res-operator.crunchydata.com_pgadmins.yaml | 4 + .../controller/standalone_pgadmin/users.go | 58 ++++++++---- .../standalone_pgadmin/users_test.go | 90 +++++++++++++------ .../v1beta1/standalone_pgadmin_types.go | 4 + .../01-assert.yaml | 8 +- .../03-assert.yaml | 10 ++- .../05-assert.yaml | 10 ++- .../07-assert.yaml | 10 ++- 8 files changed, 135 insertions(+), 59 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index d55e4d24b..85476b8db 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -2748,6 +2748,10 @@ spec: description: MajorVersion represents the major version of the running pgAdmin. type: integer + minorVersion: + description: MinorVersion represents the minor version of the running + pgAdmin. + type: string observedGeneration: description: observedGeneration represents the .metadata.generation on which the status was based. diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index e89705b63..194f80ed5 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -80,28 +80,43 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * return nil } - // If the pgAdmin version is not in the status or the image SHA has changed, get - // the pgAdmin version and store it in the status. - var pgadminVersion int - if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.ImageSHA != pgAdminImageSha { - pgadminVersion, err = r.reconcilePGAdminMajorVersion(ctx, podExecutor) + // If the pgAdmin major or minor version is not in the status or the image + // SHA has changed, get the pgAdmin version and store it in the status. + var pgadminMajorVersion int + if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.MinorVersion == "" || + pgadmin.Status.ImageSHA != pgAdminImageSha { + + pgadminMinorVersion, err := r.reconcilePGAdminVersion(ctx, podExecutor) if err != nil { return err } - pgadmin.Status.MajorVersion = pgadminVersion + + // ensure minor version is valid before storing in status + parsedMinorVersion, err := strconv.ParseFloat(pgadminMinorVersion, 64) + if err != nil { + return err + } + + // Note: "When converting a floating-point number to an integer, the + // fraction is discarded (truncation towards zero)." + // - https://go.dev/ref/spec#Conversions + pgadminMajorVersion = int(parsedMinorVersion) + + pgadmin.Status.MinorVersion = pgadminMinorVersion + pgadmin.Status.MajorVersion = pgadminMajorVersion pgadmin.Status.ImageSHA = pgAdminImageSha } else { - pgadminVersion = pgadmin.Status.MajorVersion + pgadminMajorVersion = pgadmin.Status.MajorVersion } // If the pgAdmin version is not v8 or higher, return early as user management is // only supported for pgAdmin v8 and higher. - if pgadminVersion < 8 { + if pgadminMajorVersion < 8 { // If pgAdmin version is less than v8 and user management is being attempted, // log a message clarifying that it is only supported for pgAdmin v8 and higher. if len(pgadmin.Spec.Users) > 0 { log.Info("User management is only supported for pgAdmin v8 and higher.", - "pgadminVersion", pgadminVersion) + "pgadminVersion", pgadminMajorVersion) } return err } @@ -109,11 +124,11 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * return r.writePGAdminUsers(ctx, pgadmin, podExecutor) } -// reconcilePGAdminMajorVersion execs into the pgAdmin pod and retrieves the pgAdmin major version -func (r *PGAdminReconciler) reconcilePGAdminMajorVersion(ctx context.Context, exec Executor) (int, error) { +// reconcilePGAdminVersion execs into the pgAdmin pod and retrieves the pgAdmin minor version +func (r *PGAdminReconciler) reconcilePGAdminVersion(ctx context.Context, exec Executor) (string, error) { script := fmt.Sprintf(` PGADMIN_DIR=%s -cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)" +cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_VERSION)" `, pgAdminDir) var stdin, stdout, stderr bytes.Buffer @@ -122,10 +137,10 @@ cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)" []string{"bash", "-ceu", "--", script}...) if err != nil { - return 0, err + return "", err } - return strconv.Atoi(strings.TrimSpace(stdout.String())) + return strings.TrimSpace(stdout.String()), nil } // writePGAdminUsers takes the users in the pgAdmin spec and writes (adds or updates) their data @@ -171,10 +186,23 @@ cd $PGADMIN_DIR for _, user := range existingUsersArr { existingUsersMap[user.Username] = user } + + var olderThan9_3 bool + versionFloat, err := strconv.ParseFloat(pgadmin.Status.MinorVersion, 32) + if err != nil { + return err + } + if versionFloat < 9.3 { + olderThan9_3 = true + } + intentUsers := []pgAdminUserForJson{} for _, user := range pgadmin.Spec.Users { var stdin, stdout, stderr bytes.Buffer - typeFlag := "--nonadmin" + typeFlag := "--role User" + if olderThan9_3 { + typeFlag = "--nonadmin" + } isAdmin := false if user.Role == "Administrator" { typeFlag = "--admin" diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index 5ec58dc57..3637e4993 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -110,15 +110,16 @@ func TestReconcilePGAdminUsers(t *testing.T) { assert.Equal(t, namespace, pgadmin.Namespace) assert.Equal(t, container, naming.ContainerPGAdmin) - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - _, _ = stdout.Write([]byte("7")) + // Simulate a v7.1 version of pgAdmin by setting stdout to "7.1" + // for podexec call in reconcilePGAdminVersion + _, _ = stdout.Write([]byte("7.1")) return nil } assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) assert.Equal(t, calls, 1, "PodExec should be called once") assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.MinorVersion, "7.1") assert.Equal(t, pgadmin.Status.ImageSHA, "fakeSHA") }) @@ -145,20 +146,58 @@ func TestReconcilePGAdminUsers(t *testing.T) { ) error { calls++ - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - _, _ = stdout.Write([]byte("7")) + // Simulate a v7.1 version of pgAdmin by setting stdout to "7.1" + // for podexec call in reconcilePGAdminVersion + _, _ = stdout.Write([]byte("7.1")) return nil } assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) assert.Equal(t, calls, 1, "PodExec should be called once") assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.MinorVersion, "7.1") assert.Equal(t, pgadmin.Status.ImageSHA, "newFakeSHA") }) + + t.Run("PodHealthyBadVersion", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() + + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" + + r := new(PGAdminReconciler) + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( + ctx context.Context, namespace, pod, container string, + stdin io.Reader, stdout, stderr io.Writer, command ...string, + ) error { + calls++ + + assert.Equal(t, pod, "pgadmin-123-0") + assert.Equal(t, namespace, pgadmin.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + + // set expected version to something completely wrong + _, _ = stdout.Write([]byte("woot")) + return nil + } + + assert.ErrorContains(t, r.reconcilePGAdminUsers(ctx, pgadmin), "strconv.ParseFloat: parsing \"woot\": invalid syntax") + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 0) + assert.Equal(t, pgadmin.Status.MinorVersion, "") + assert.Equal(t, pgadmin.Status.ImageSHA, "") + }) } -func TestReconcilePGAdminMajorVersion(t *testing.T) { +func TestReconcilePGAdminVersion(t *testing.T) { ctx := context.Background() pod := corev1.Pod{} pod.Namespace = "test-namespace" @@ -180,30 +219,15 @@ func TestReconcilePGAdminMajorVersion(t *testing.T) { assert.Equal(t, namespace, "test-namespace") assert.Equal(t, container, naming.ContainerPGAdmin) - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - _, _ = stdout.Write([]byte("7")) + // Simulate a v9.3 version of pgAdmin by setting stdout to "9.3" + // for podexec call in reconcilePGAdminVersion + _, _ = stdout.Write([]byte("9.3")) return nil } - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + version, err := reconciler.reconcilePGAdminVersion(ctx, podExecutor) assert.NilError(t, err) - assert.Equal(t, version, 7) - }) - - t.Run("FailedRetrieval", func(t *testing.T) { - reconciler.PodExec = func( - ctx context.Context, namespace, pod, container string, - stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - // Simulate the python call giving bad data (not a version int) - _, _ = stdout.Write([]byte("asdfjkl;")) - return nil - } - - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) - assert.Check(t, err != nil) - assert.Equal(t, version, 0) + assert.Equal(t, version, "9.3") }) t.Run("PodExecError", func(t *testing.T) { @@ -214,9 +238,9 @@ func TestReconcilePGAdminMajorVersion(t *testing.T) { return errors.New("PodExecError") } - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) + version, err := reconciler.reconcilePGAdminVersion(ctx, podExecutor) assert.Check(t, err != nil) - assert.Equal(t, version, 0) + assert.Equal(t, version, "") }) } @@ -244,6 +268,14 @@ func TestWritePGAdminUsers(t *testing.T) { }`) assert.NilError(t, cc.Create(ctx, pgadmin)) + // fake the status so that the correct commands will be used when creating + // users. + pgadmin.Status = v1beta1.PGAdminStatus{ + ImageSHA: "fakesha", + MajorVersion: 9, + MinorVersion: "9.3", + } + userPasswordSecret1 := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "user-password-secret1", diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go index 4b88f1272..e1147eb3d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go @@ -231,6 +231,10 @@ type PGAdminStatus struct { // +optional MajorVersion int `json:"majorVersion,omitempty"` + // MinorVersion represents the minor version of the running pgAdmin. + // +optional + MinorVersion string `json:"minorVersion,omitempty"` + // observedGeneration represents the .metadata.generation on which the status was based. // +optional // +kubebuilder:validation:Minimum=0 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml index 244533b7e..029033914 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml @@ -6,12 +6,14 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml index 01aff25b3..00c3d819f 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml @@ -6,13 +6,15 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="jimi@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "Administrator" ] && [ $jimi_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml index 1dca13a7b..f6eb83b2d 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml @@ -6,13 +6,15 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="jimi@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "Administrator" ] && [ $jimi_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml index 5c0e7267e..3e3d8396b 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml @@ -6,13 +6,15 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="jimi@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "Administrator" ] && [ $jimi_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) From d4dd1b869838cf596238e41026d41162b78a8f6e Mon Sep 17 00:00:00 2001 From: TJ Moore Date: Fri, 26 Sep 2025 11:37:59 -0400 Subject: [PATCH 26/43] Handle pgAdmin UserWarning Capture the an expected user warning for pgAdmin9.8 using python3.11 and log as an INFO message rather than an ERROR which short-circuits user creation and updating. --- internal/controller/standalone_pgadmin/users.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 194f80ed5..3a08814fa 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -258,6 +258,8 @@ cd $PGADMIN_DIR log.Error(err, "PodExec failed: ") intentUsers = append(intentUsers, existingUser) continue + } else if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + log.Info(stderr.String()) } else if strings.TrimSpace(stderr.String()) != "" { log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", intentUser.Username)) @@ -292,7 +294,9 @@ cd $PGADMIN_DIR log.Error(err, "PodExec failed: ") continue } - if strings.TrimSpace(stderr.String()) != "" { + if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + log.Info(stderr.String()) + } else if strings.TrimSpace(stderr.String()) != "" { log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", intentUser.Username)) continue From e395e2ad815642bc13b16b7fd043ea49b726319f Mon Sep 17 00:00:00 2001 From: TJ Moore Date: Sat, 27 Sep 2025 19:30:08 -0400 Subject: [PATCH 27/43] Remove reconcilePGAdminVersion, adjust tests and add clarifying comments --- .../controller/standalone_pgadmin/users.go | 46 ++++++++------- .../standalone_pgadmin/users_test.go | 56 ++++++++----------- 2 files changed, 44 insertions(+), 58 deletions(-) diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 3a08814fa..e66ee43ea 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -86,11 +86,21 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.MinorVersion == "" || pgadmin.Status.ImageSHA != pgAdminImageSha { - pgadminMinorVersion, err := r.reconcilePGAdminVersion(ctx, podExecutor) - if err != nil { + // exec into the pgAdmin pod and retrieve the pgAdmin minor version + script := fmt.Sprintf(` +PGADMIN_DIR=%s +cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_VERSION)" +`, pgAdminDir) + + var stdin, stdout, stderr bytes.Buffer + + if err := podExecutor(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...); err != nil { return err } + pgadminMinorVersion := strings.TrimSpace(stdout.String()) + // ensure minor version is valid before storing in status parsedMinorVersion, err := strconv.ParseFloat(pgadminMinorVersion, 64) if err != nil { @@ -124,25 +134,6 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * return r.writePGAdminUsers(ctx, pgadmin, podExecutor) } -// reconcilePGAdminVersion execs into the pgAdmin pod and retrieves the pgAdmin minor version -func (r *PGAdminReconciler) reconcilePGAdminVersion(ctx context.Context, exec Executor) (string, error) { - script := fmt.Sprintf(` -PGADMIN_DIR=%s -cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_VERSION)" -`, pgAdminDir) - - var stdin, stdout, stderr bytes.Buffer - - err := exec(ctx, &stdin, &stdout, &stderr, - []string{"bash", "-ceu", "--", script}...) - - if err != nil { - return "", err - } - - return strings.TrimSpace(stdout.String()), nil -} - // writePGAdminUsers takes the users in the pgAdmin spec and writes (adds or updates) their data // to both pgAdmin and the users.json file that is stored in the pgAdmin secret. If a user is // removed from the spec, its data is removed from users.json, but it is not deleted from pgAdmin. @@ -188,7 +179,7 @@ cd $PGADMIN_DIR } var olderThan9_3 bool - versionFloat, err := strconv.ParseFloat(pgadmin.Status.MinorVersion, 32) + versionFloat, err := strconv.ParseFloat(pgadmin.Status.MinorVersion, 64) if err != nil { return err } @@ -199,6 +190,8 @@ cd $PGADMIN_DIR intentUsers := []pgAdminUserForJson{} for _, user := range pgadmin.Spec.Users { var stdin, stdout, stderr bytes.Buffer + // starting in pgAdmin 9.3, custom roles are supported and a new flag is used + // - https://github.com/pgadmin-org/pgadmin4/pull/8631 typeFlag := "--role User" if olderThan9_3 { typeFlag = "--nonadmin" @@ -258,10 +251,13 @@ cd $PGADMIN_DIR log.Error(err, "PodExec failed: ") intentUsers = append(intentUsers, existingUser) continue + } else if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + // Started seeing this error with pgAdmin 9.7 when using Python 3.11. + // Issue appears to resolve with Python 3.13. log.Info(stderr.String()) } else if strings.TrimSpace(stderr.String()) != "" { - log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py update-user error for %s: ", intentUser.Username)) intentUsers = append(intentUsers, existingUser) continue @@ -295,9 +291,11 @@ cd $PGADMIN_DIR continue } if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + // Started seeing this error with pgAdmin 9.7 when using Python 3.11. + // Issue appears to resolve with Python 3.13. log.Info(stderr.String()) } else if strings.TrimSpace(stderr.String()) != "" { - log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py add-user error for %s: ", intentUser.Username)) continue } diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index 3637e4993..47893a4fe 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -195,52 +195,40 @@ func TestReconcilePGAdminUsers(t *testing.T) { assert.Equal(t, pgadmin.Status.MinorVersion, "") assert.Equal(t, pgadmin.Status.ImageSHA, "") }) -} -func TestReconcilePGAdminVersion(t *testing.T) { - ctx := context.Background() - pod := corev1.Pod{} - pod.Namespace = "test-namespace" - pod.Name = "pgadmin-123-0" - reconciler := &PGAdminReconciler{} + t.Run("PodExecError", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() - podExecutor := func( - ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) - } + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" - t.Run("SuccessfulRetrieval", func(t *testing.T) { - reconciler.PodExec = func( + r := new(PGAdminReconciler) + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { + calls++ + assert.Equal(t, pod, "pgadmin-123-0") - assert.Equal(t, namespace, "test-namespace") + assert.Equal(t, namespace, pgadmin.Namespace) assert.Equal(t, container, naming.ContainerPGAdmin) - // Simulate a v9.3 version of pgAdmin by setting stdout to "9.3" - // for podexec call in reconcilePGAdminVersion - _, _ = stdout.Write([]byte("9.3")) - return nil - } - - version, err := reconciler.reconcilePGAdminVersion(ctx, podExecutor) - assert.NilError(t, err) - assert.Equal(t, version, "9.3") - }) - - t.Run("PodExecError", func(t *testing.T) { - reconciler.PodExec = func( - ctx context.Context, namespace, pod, container string, - stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { return errors.New("PodExecError") } - version, err := reconciler.reconcilePGAdminVersion(ctx, podExecutor) - assert.Check(t, err != nil) - assert.Equal(t, version, "") + assert.Error(t, r.reconcilePGAdminUsers(ctx, pgadmin), "PodExecError") + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 0) + assert.Equal(t, pgadmin.Status.MinorVersion, "") + assert.Equal(t, pgadmin.Status.ImageSHA, "") }) } From 04dc9a2bd39799c7d40f42211b648b227538efc6 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Mon, 29 Sep 2025 11:46:47 -0500 Subject: [PATCH 28/43] Check environment before starting Postgres major upgrade This leaves the disk untouched when the upgrade image cannot support the requested upgrade. Issue: PGO-300 See: 406e069c2e038befbcc122a912e692712839b22c --- internal/controller/pgupgrade/jobs.go | 8 +++++++- internal/controller/pgupgrade/jobs_test.go | 8 ++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index 4715c8da9..be39b4d59 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -49,7 +49,7 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s newVersion := spec.ToPostgresVersion // if the fetch key command is set for TDE, provide the value during initialization - initdb := `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}"` + initdb := `/usr/pgsql-"${new_version}"/bin/initdb --allow-group-access -k -D /pgdata/pg"${new_version}"` if fetchKeyCommand != "" { initdb += ` --encryption-key-command "` + fetchKeyCommand + `"` } @@ -80,6 +80,12 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s // Enable nss_wrapper so the current UID and GID resolve to "postgres". // - https://cwrap.org/nss_wrapper.html `export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD`, + `id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]]`, + + // Expect Postgres executables at the Red Hat paths. + `[[ -x /usr/pgsql-"${old_version}"/bin/postgres ]]`, + `[[ -x /usr/pgsql-"${new_version}"/bin/initdb ]]`, + `[[ -d /pgdata/pg"${old_version}" ]]`, // Below is the pg_upgrade script used to upgrade a PostgresCluster from // one major version to another. Additional information concerning the diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index a94641d4c..5b1f6bc4f 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -208,11 +208,15 @@ spec: (sed "/^postgres:x:/ d; /^[^:]*:x:${uid}:/ d" /etc/passwd echo "postgres:x:${uid}:${gid%% *}::${data_volume}:") > "${NSS_WRAPPER_PASSWD}" export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD + id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]] + [[ -x /usr/pgsql-"${old_version}"/bin/postgres ]] + [[ -x /usr/pgsql-"${new_version}"/bin/initdb ]] + [[ -d /pgdata/pg"${old_version}" ]] cd /pgdata || exit echo -e "Step 1: Making new pgdata directory...\n" mkdir /pgdata/pg"${new_version}" echo -e "Step 2: Initializing new pgdata directory...\n" - /usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}" + /usr/pgsql-"${new_version}"/bin/initdb --allow-group-access -k -D /pgdata/pg"${new_version}" echo -e "\nStep 3: Setting the expected permissions on the old pgdata directory...\n" chmod 750 /pgdata/pg"${old_version}" echo -e "Step 4: Copying shared_preload_libraries setting to new postgresql.conf file...\n" @@ -263,7 +267,7 @@ status: {} tdeJob := reconciler.generateUpgradeJob(ctx, upgrade, startup, "echo testKey") assert.Assert(t, cmp.MarshalContains(tdeJob, - `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}" --encryption-key-command "echo testKey"`)) + `/usr/pgsql-"${new_version}"/bin/initdb --allow-group-access -k -D /pgdata/pg"${new_version}" --encryption-key-command "echo testKey"`)) } func TestGenerateRemoveDataJob(t *testing.T) { From 0732b4825f3a0acdc7fbceff74891887dd40b49e Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Fri, 7 Mar 2025 12:26:09 -0600 Subject: [PATCH 29/43] Simplify upgrade scripts with more variables I find this Go code and resulting Bash easier to read. This also logs more about what is happening without changing the sequence of commands. The script included an unnecessary `|| exit`, so I moved the `set -eu` out of the Bash invocation into the script itself to make that behavior more obvious. --- internal/controller/pgupgrade/jobs.go | 149 ++++++++++----------- internal/controller/pgupgrade/jobs_test.go | 79 +++++------ 2 files changed, 115 insertions(+), 113 deletions(-) diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index be39b4d59..ac71e4a0a 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -21,11 +21,10 @@ import ( "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/shell" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// Upgrade job - // pgUpgradeJob returns the ObjectMeta for the pg_upgrade Job utilized to // upgrade from one major PostgreSQL version to another func pgUpgradeJob(upgrade *v1beta1.PGUpgrade) metav1.ObjectMeta { @@ -48,20 +47,24 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s oldVersion := spec.FromPostgresVersion newVersion := spec.ToPostgresVersion - // if the fetch key command is set for TDE, provide the value during initialization - initdb := `/usr/pgsql-"${new_version}"/bin/initdb --allow-group-access -k -D /pgdata/pg"${new_version}"` + var argEncryptionKeyCommand string if fetchKeyCommand != "" { - initdb += ` --encryption-key-command "` + fetchKeyCommand + `"` + argEncryptionKeyCommand = ` --encryption-key-command=` + shell.QuoteWord(fetchKeyCommand) } args := []string{fmt.Sprint(oldVersion), fmt.Sprint(newVersion)} script := strings.Join([]string{ + // Exit immediately when a pipeline or subshell exits non-zero or when expanding an unset variable. + `shopt -so errexit nounset`, + `declare -r data_volume='/pgdata' old_version="$1" new_version="$2"`, - `printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n\n' "$@"`, + `printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n' "$@"`, + `section() { printf '\n\n%s\n' "$@"; }`, - // Note: Rather than import the nss_wrapper init container, as we do in - // the main postgres-operator, this job does the required nss_wrapper + // NOTE: Rather than import the nss_wrapper init container, as we do in + // the PostgresCluster controller, this job does the required nss_wrapper // settings here. + `section 'Step 1 of 7: Ensuring username is postgres...'`, // Create a copy of the system group definitions, but remove the "postgres" // group or any group with the current GID. Replace them with our own that @@ -82,57 +85,54 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s `export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD`, `id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]]`, + `section 'Step 2 of 7: Finding data and tools...'`, + // Expect Postgres executables at the Red Hat paths. - `[[ -x /usr/pgsql-"${old_version}"/bin/postgres ]]`, - `[[ -x /usr/pgsql-"${new_version}"/bin/initdb ]]`, - `[[ -d /pgdata/pg"${old_version}" ]]`, + `old_bin="/usr/pgsql-${old_version}/bin" && [[ -x "${old_bin}/postgres" ]]`, + `new_bin="/usr/pgsql-${new_version}/bin" && [[ -x "${new_bin}/initdb" ]]`, + `old_data="${data_volume}/pg${old_version}" && [[ -d "${old_data}" ]]`, + `new_data="${data_volume}/pg${new_version}"`, + + // pg_upgrade writes its files in "${new_data}/pg_upgrade_output.d" since PostgreSQL v15. + // Change to a writable working directory to be compatible with PostgreSQL v14 and earlier. + // + // https://www.postgresql.org/docs/release/15#id-1.11.6.20.5.11.3 + `cd "${data_volume}"`, // Below is the pg_upgrade script used to upgrade a PostgresCluster from // one major version to another. Additional information concerning the // steps used and command flag specifics can be found in the documentation: // - https://www.postgresql.org/docs/current/pgupgrade.html - // To begin, we first move to the mounted /pgdata directory and create a - // new version directory which is then initialized with the initdb command. - `cd /pgdata || exit`, - `echo -e "Step 1: Making new pgdata directory...\n"`, - `mkdir /pgdata/pg"${new_version}"`, - `echo -e "Step 2: Initializing new pgdata directory...\n"`, - initdb, - - // Before running the upgrade check, which ensures the clusters are compatible, - // proper permissions have to be set on the old pgdata directory and the - // preload library settings must be copied over. - `echo -e "\nStep 3: Setting the expected permissions on the old pgdata directory...\n"`, - `chmod 750 /pgdata/pg"${old_version}"`, - `echo -e "Step 4: Copying shared_preload_libraries setting to new postgresql.conf file...\n"`, - `echo "shared_preload_libraries = '$(/usr/pgsql-"""${old_version}"""/bin/postgres -D \`, - `/pgdata/pg"""${old_version}""" -C shared_preload_libraries)'" >> /pgdata/pg"${new_version}"/postgresql.conf`, - - // Before the actual upgrade is run, we will run the upgrade --check to - // verify everything before actually changing any data. - `echo -e "Step 5: Running pg_upgrade check...\n"`, - `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, - `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\`, - ` --new-datadir /pgdata/pg"${new_version}" --check` + argMethod + argJobs, - - // Assuming the check completes successfully, the pg_upgrade command will - // be run that actually prepares the upgraded pgdata directory. - `echo -e "\nStep 6: Running pg_upgrade...\n"`, - `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, - `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \`, - `--new-datadir /pgdata/pg"${new_version}"` + argMethod + argJobs, - - // Since we have cleared the Patroni cluster step by removing the EndPoints, we copy patroni.dynamic.json - // from the old data dir to help retain PostgreSQL parameters you had set before. - // - https://patroni.readthedocs.io/en/latest/existing_data.html#major-upgrade-of-postgresql-version - `echo -e "\nStep 7: Copying patroni.dynamic.json...\n"`, - `cp /pgdata/pg"${old_version}"/patroni.dynamic.json /pgdata/pg"${new_version}"`, - - `echo -e "\npg_upgrade Job Complete!"`, + `section 'Step 3 of 7: Initializing new data directory...'`, + `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access --data-checksums` + argEncryptionKeyCommand, + + // Read the configured value then quote it; every single-quote U+0027 is replaced by two. + // + // https://www.postgresql.org/docs/current/config-setting.html + // https://www.gnu.org/software/bash/manual/bash.html#ANSI_002dC-Quoting + `section 'Step 4 of 7: Copying shared_preload_libraries parameter...'`, + `value=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/postgres" -C shared_preload_libraries)`, + `echo >> "${new_data}/postgresql.conf" "shared_preload_libraries = '${value//$'\''/$'\'\''}'"`, + + `section 'Step 5 of 7: Checking for potential issues...'`, + `"${new_bin}/pg_upgrade" --check` + argMethod + argJobs + ` \`, + `--old-bindir="${old_bin}" --old-datadir="${old_data}" \`, + `--new-bindir="${new_bin}" --new-datadir="${new_data}"`, + + `section 'Step 6 of 7: Performing upgrade...'`, + `(set -x && time "${new_bin}/pg_upgrade"` + argMethod + argJobs + ` \`, + `--old-bindir="${old_bin}" --old-datadir="${old_data}" \`, + `--new-bindir="${new_bin}" --new-datadir="${new_data}")`, + + // https://patroni.readthedocs.io/en/latest/existing_data.html#major-upgrade-of-postgresql-version + `section 'Step 7 of 7: Copying Patroni settings...'`, + `(set -x && cp "${old_data}/patroni.dynamic.json" "${new_data}")`, + + `section 'Success!'`, }, "\n") - return append([]string{"bash", "-ceu", "--", script, "upgrade"}, args...) + return append([]string{"bash", "-c", "--", script, "upgrade"}, args...) } // largestWholeCPU returns the maximum CPU request or limit as a non-negative @@ -238,38 +238,37 @@ func (r *PGUpgradeReconciler) generateUpgradeJob( // We currently target the `pgdata/pg{old_version}` and `pgdata/pg{old_version}_wal` // directories for removal. func removeDataCommand(upgrade *v1beta1.PGUpgrade) []string { - oldVersion := fmt.Sprint(upgrade.Spec.FromPostgresVersion) + oldVersion := upgrade.Spec.FromPostgresVersion // Before removing the directories (both data and wal), we check that // the directory is not in use by running `pg_controldata` and making sure // the server state is "shut down in recovery" - // TODO(benjaminjb): pg_controldata seems pretty stable, but might want to - // experiment with a few more versions. - args := []string{oldVersion} + args := []string{fmt.Sprint(oldVersion)} script := strings.Join([]string{ - `declare -r old_version="$1"`, - `printf 'Removing PostgreSQL data dir for pg%s...\n\n' "$@"`, - `echo -e "Checking the directory exists and isn't being used...\n"`, - `cd /pgdata || exit`, - // The string `shut down in recovery` is the dbstate that postgres sets from - // at least version 10 to 14 when a replica has been shut down. - // - https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/bin/pg_controldata/pg_controldata.c;h=f911f98d946d83f1191abf35239d9b4455c5f52a;hb=HEAD#l59 - // Note: `pg_controldata` is actually used by `pg_upgrade` before upgrading - // to make sure that the server in question is shut down as a primary; - // that aligns with our use here, where we're making sure that the server in question - // was shut down as a replica. - // - https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/bin/pg_upgrade/controldata.c;h=41b8f69b8cbe4f40e6098ad84c2e8e987e24edaf;hb=HEAD#l122 - `if [ "$(/usr/pgsql-"${old_version}"/bin/pg_controldata /pgdata/pg"${old_version}" | grep -c "shut down in recovery")" -ne 1 ]; then echo -e "Directory in use, cannot remove..."; exit 1; fi`, - `echo -e "Removing old pgdata directory...\n"`, - // When deleting the wal directory, use `realpath` to resolve the symlink from - // the pgdata directory. This is necessary because the wal directory can be - // mounted at different places depending on if an external wal PVC is used, - // i.e. `/pgdata/pg14_wal` vs `/pgwal/pg14_wal` - `rm -rf /pgdata/pg"${old_version}" "$(realpath /pgdata/pg${old_version}/pg_wal)"`, - `echo -e "Remove Data Job Complete!"`, + // Exit immediately when a pipeline or subshell exits non-zero or when expanding an unset variable. + `shopt -so errexit nounset`, + + `declare -r data_volume='/pgdata' old_version="$1"`, + `printf 'Removing PostgreSQL %s data...\n\n' "$@"`, + `delete() (set -x && rm -rf -- "$@")`, + + `old_data="${data_volume}/pg${old_version}"`, + `control=$(LC_ALL=C /usr/pgsql-${old_version}/bin/pg_controldata "${old_data}")`, + `read -r state <<< "${control##*cluster state:}"`, + + // We expect exactly one state for a replica that has been stopped. + // + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_10_0;f=src/bin/pg_controldata/pg_controldata.c#l55 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_17_0;f=src/bin/pg_controldata/pg_controldata.c#l58 + `[[ "${state}" == 'shut down in recovery' ]] || { printf >&2 'Unexpected state! %q\n' "${state}"; exit 1; }`, + + // "rm" does not follow symbolic links. + // Delete the old data directory after subdirectories that contain versioned data. + `delete "${old_data}/pg_wal/"`, + `delete "${old_data}" && echo 'Success!'`, }, "\n") - return append([]string{"bash", "-ceu", "--", script, "remove"}, args...) + return append([]string{"bash", "-c", "--", script, "remove"}, args...) } // generateRemoveDataJob returns a Job that can remove the data diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index 5b1f6bc4f..7ce22be37 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -87,7 +87,7 @@ func TestUpgradeCommand(t *testing.T) { spec := &v1beta1.PGUpgradeSettings{Jobs: tt.Spec} command := upgradeCommand(spec, "") assert.Assert(t, len(command) > 3) - assert.DeepEqual(t, []string{"bash", "-ceu", "--"}, command[:3]) + assert.DeepEqual(t, []string{"bash", "-c", "--"}, command[:3]) script := command[3] assert.Assert(t, cmp.Contains(script, tt.Args)) @@ -111,7 +111,7 @@ func TestUpgradeCommand(t *testing.T) { spec := &v1beta1.PGUpgradeSettings{TransferMethod: tt.Spec} command := upgradeCommand(spec, "") assert.Assert(t, len(command) > 3) - assert.DeepEqual(t, []string{"bash", "-ceu", "--"}, command[:3]) + assert.DeepEqual(t, []string{"bash", "-c", "--"}, command[:3]) script := command[3] assert.Assert(t, cmp.Contains(script, tt.Args)) @@ -196,11 +196,14 @@ spec: containers: - command: - bash - - -ceu + - -c - -- - |- + shopt -so errexit nounset declare -r data_volume='/pgdata' old_version="$1" new_version="$2" - printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n\n' "$@" + printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n' "$@" + section() { printf '\n\n%s\n' "$@"; } + section 'Step 1 of 7: Ensuring username is postgres...' gid=$(id -G); NSS_WRAPPER_GROUP=$(mktemp) (sed "/^postgres:x:/ d; /^[^:]*:x:${gid%% *}:/ d" /etc/group echo "postgres:x:${gid%% *}:") > "${NSS_WRAPPER_GROUP}" @@ -209,30 +212,28 @@ spec: echo "postgres:x:${uid}:${gid%% *}::${data_volume}:") > "${NSS_WRAPPER_PASSWD}" export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]] - [[ -x /usr/pgsql-"${old_version}"/bin/postgres ]] - [[ -x /usr/pgsql-"${new_version}"/bin/initdb ]] - [[ -d /pgdata/pg"${old_version}" ]] - cd /pgdata || exit - echo -e "Step 1: Making new pgdata directory...\n" - mkdir /pgdata/pg"${new_version}" - echo -e "Step 2: Initializing new pgdata directory...\n" - /usr/pgsql-"${new_version}"/bin/initdb --allow-group-access -k -D /pgdata/pg"${new_version}" - echo -e "\nStep 3: Setting the expected permissions on the old pgdata directory...\n" - chmod 750 /pgdata/pg"${old_version}" - echo -e "Step 4: Copying shared_preload_libraries setting to new postgresql.conf file...\n" - echo "shared_preload_libraries = '$(/usr/pgsql-"""${old_version}"""/bin/postgres -D \ - /pgdata/pg"""${old_version}""" -C shared_preload_libraries)'" >> /pgdata/pg"${new_version}"/postgresql.conf - echo -e "Step 5: Running pg_upgrade check...\n" - time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ - --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\ - --new-datadir /pgdata/pg"${new_version}" --check --link --jobs=1 - echo -e "\nStep 6: Running pg_upgrade...\n" - time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ - --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \ - --new-datadir /pgdata/pg"${new_version}" --link --jobs=1 - echo -e "\nStep 7: Copying patroni.dynamic.json...\n" - cp /pgdata/pg"${old_version}"/patroni.dynamic.json /pgdata/pg"${new_version}" - echo -e "\npg_upgrade Job Complete!" + section 'Step 2 of 7: Finding data and tools...' + old_bin="/usr/pgsql-${old_version}/bin" && [[ -x "${old_bin}/postgres" ]] + new_bin="/usr/pgsql-${new_version}/bin" && [[ -x "${new_bin}/initdb" ]] + old_data="${data_volume}/pg${old_version}" && [[ -d "${old_data}" ]] + new_data="${data_volume}/pg${new_version}" + cd "${data_volume}" + section 'Step 3 of 7: Initializing new data directory...' + PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access --data-checksums + section 'Step 4 of 7: Copying shared_preload_libraries parameter...' + value=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/postgres" -C shared_preload_libraries) + echo >> "${new_data}/postgresql.conf" "shared_preload_libraries = '${value//$'\''/$'\'\''}'" + section 'Step 5 of 7: Checking for potential issues...' + "${new_bin}/pg_upgrade" --check --link --jobs=1 \ + --old-bindir="${old_bin}" --old-datadir="${old_data}" \ + --new-bindir="${new_bin}" --new-datadir="${new_data}" + section 'Step 6 of 7: Performing upgrade...' + (set -x && time "${new_bin}/pg_upgrade" --link --jobs=1 \ + --old-bindir="${old_bin}" --old-datadir="${old_data}" \ + --new-bindir="${new_bin}" --new-datadir="${new_data}") + section 'Step 7 of 7: Copying Patroni settings...' + (set -x && cp "${old_data}/patroni.dynamic.json" "${new_data}") + section 'Success!' - upgrade - "19" - "25" @@ -267,7 +268,7 @@ status: {} tdeJob := reconciler.generateUpgradeJob(ctx, upgrade, startup, "echo testKey") assert.Assert(t, cmp.MarshalContains(tdeJob, - `/usr/pgsql-"${new_version}"/bin/initdb --allow-group-access -k -D /pgdata/pg"${new_version}" --encryption-key-command "echo testKey"`)) + `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access --data-checksums --encryption-key-command='echo testKey'`)) } func TestGenerateRemoveDataJob(t *testing.T) { @@ -343,17 +344,19 @@ spec: containers: - command: - bash - - -ceu + - -c - -- - |- - declare -r old_version="$1" - printf 'Removing PostgreSQL data dir for pg%s...\n\n' "$@" - echo -e "Checking the directory exists and isn't being used...\n" - cd /pgdata || exit - if [ "$(/usr/pgsql-"${old_version}"/bin/pg_controldata /pgdata/pg"${old_version}" | grep -c "shut down in recovery")" -ne 1 ]; then echo -e "Directory in use, cannot remove..."; exit 1; fi - echo -e "Removing old pgdata directory...\n" - rm -rf /pgdata/pg"${old_version}" "$(realpath /pgdata/pg${old_version}/pg_wal)" - echo -e "Remove Data Job Complete!" + shopt -so errexit nounset + declare -r data_volume='/pgdata' old_version="$1" + printf 'Removing PostgreSQL %s data...\n\n' "$@" + delete() (set -x && rm -rf -- "$@") + old_data="${data_volume}/pg${old_version}" + control=$(LC_ALL=C /usr/pgsql-${old_version}/bin/pg_controldata "${old_data}") + read -r state <<< "${control##*cluster state:}" + [[ "${state}" == 'shut down in recovery' ]] || { printf >&2 'Unexpected state! %q\n' "${state}"; exit 1; } + delete "${old_data}/pg_wal/" + delete "${old_data}" && echo 'Success!' - remove - "19" image: img4 From b1b6652455fa04ef8aea0c21b75b230f62577e26 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Fri, 26 Sep 2025 16:08:14 -0500 Subject: [PATCH 30/43] Look in more directories for upgrade binaries This makes major upgrades compatible with images from other distros. Issue: PGO-864 --- internal/controller/pgupgrade/jobs.go | 26 ++++++++++++++---- internal/controller/pgupgrade/jobs_test.go | 10 ++++--- internal/postgres/config.go | 11 ++++++++ internal/postgres/config_test.go | 31 ++++++++++++++++++++++ 4 files changed, 70 insertions(+), 8 deletions(-) diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index ac71e4a0a..9dbf76ea5 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -21,6 +21,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/internal/shell" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -86,13 +87,25 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s `id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]]`, `section 'Step 2 of 7: Finding data and tools...'`, - - // Expect Postgres executables at the Red Hat paths. - `old_bin="/usr/pgsql-${old_version}/bin" && [[ -x "${old_bin}/postgres" ]]`, - `new_bin="/usr/pgsql-${new_version}/bin" && [[ -x "${new_bin}/initdb" ]]`, `old_data="${data_volume}/pg${old_version}" && [[ -d "${old_data}" ]]`, `new_data="${data_volume}/pg${new_version}"`, + // Search for Postgres executables matching the old and new versions. + // Use `command -v` to look through all of PATH, then trim the executable name from the absolute path. + `old_bin=$(` + postgres.ShellPath(oldVersion) + ` && command -v postgres)`, + `old_bin="${old_bin%/postgres}"`, + `new_bin=$(` + postgres.ShellPath(newVersion) + ` && command -v pg_upgrade)`, + `new_bin="${new_bin%/pg_upgrade}"`, + + // The executables found might not be the versions we need, so do a cursory check before writing to disk. + // pg_upgrade checks every executable thoroughly since PostgreSQL v14. + // + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_10_0;f=src/bin/pg_upgrade/exec.c#l355 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_14_0;f=src/bin/pg_upgrade/exec.c#l358 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_18_0;f=src/bin/pg_upgrade/exec.c#l370 + `(set -x && [[ "$("${old_bin}/postgres" --version)" =~ ") ${old_version}"($|[^0-9]) ]])`, + `(set -x && [[ "$("${new_bin}/initdb" --version)" =~ ") ${new_version}"($|[^0-9]) ]])`, + // pg_upgrade writes its files in "${new_data}/pg_upgrade_output.d" since PostgreSQL v15. // Change to a writable working directory to be compatible with PostgreSQL v14 and earlier. // @@ -115,6 +128,9 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s `value=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/postgres" -C shared_preload_libraries)`, `echo >> "${new_data}/postgresql.conf" "shared_preload_libraries = '${value//$'\''/$'\'\''}'"`, + // NOTE: The default for --new-bindir is the directory of pg_upgrade since PostgreSQL v13. + // + // https://www.postgresql.org/docs/release/13#id-1.11.6.28.5.11 `section 'Step 5 of 7: Checking for potential issues...'`, `"${new_bin}/pg_upgrade" --check` + argMethod + argJobs + ` \`, `--old-bindir="${old_bin}" --old-datadir="${old_data}" \`, @@ -253,7 +269,7 @@ func removeDataCommand(upgrade *v1beta1.PGUpgrade) []string { `delete() (set -x && rm -rf -- "$@")`, `old_data="${data_volume}/pg${old_version}"`, - `control=$(LC_ALL=C /usr/pgsql-${old_version}/bin/pg_controldata "${old_data}")`, + `control=$(` + postgres.ShellPath(oldVersion) + ` && LC_ALL=C pg_controldata "${old_data}")`, `read -r state <<< "${control##*cluster state:}"`, // We expect exactly one state for a replica that has been stopped. diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index 7ce22be37..9c9e00063 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -213,10 +213,14 @@ spec: export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]] section 'Step 2 of 7: Finding data and tools...' - old_bin="/usr/pgsql-${old_version}/bin" && [[ -x "${old_bin}/postgres" ]] - new_bin="/usr/pgsql-${new_version}/bin" && [[ -x "${new_bin}/initdb" ]] old_data="${data_volume}/pg${old_version}" && [[ -d "${old_data}" ]] new_data="${data_volume}/pg${new_version}" + old_bin=$(PATH="/usr/lib/postgresql/19/bin:/usr/libexec/postgresql19:/usr/pgsql-19/bin${PATH+:${PATH}}" && command -v postgres) + old_bin="${old_bin%/postgres}" + new_bin=$(PATH="/usr/lib/postgresql/25/bin:/usr/libexec/postgresql25:/usr/pgsql-25/bin${PATH+:${PATH}}" && command -v pg_upgrade) + new_bin="${new_bin%/pg_upgrade}" + (set -x && [[ "$("${old_bin}/postgres" --version)" =~ ") ${old_version}"($|[^0-9]) ]]) + (set -x && [[ "$("${new_bin}/initdb" --version)" =~ ") ${new_version}"($|[^0-9]) ]]) cd "${data_volume}" section 'Step 3 of 7: Initializing new data directory...' PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access --data-checksums @@ -352,7 +356,7 @@ spec: printf 'Removing PostgreSQL %s data...\n\n' "$@" delete() (set -x && rm -rf -- "$@") old_data="${data_volume}/pg${old_version}" - control=$(LC_ALL=C /usr/pgsql-${old_version}/bin/pg_controldata "${old_data}") + control=$(PATH="/usr/lib/postgresql/19/bin:/usr/libexec/postgresql19:/usr/pgsql-19/bin${PATH+:${PATH}}" && LC_ALL=C pg_controldata "${old_data}") read -r state <<< "${control##*cluster state:}" [[ "${state}" == 'shut down in recovery' ]] || { printf >&2 'Unexpected state! %q\n' "${state}"; exit 1; } delete "${old_data}/pg_wal/" diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 75371e6af..ebefc9dd6 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -264,6 +264,17 @@ func Environment(cluster *v1beta1.PostgresCluster) []corev1.EnvVar { } } +// ShellPath returns a POSIX shell command that prepends typical Postgres executable paths to the PATH variable. +func ShellPath(postgresVersion int32) string { + return fmt.Sprintf(`PATH="`+ + strings.Join([]string{ + `/usr/lib/postgresql/%[1]d/bin`, // Debian + `/usr/libexec/postgresql%[1]d`, // Alpine + `/usr/pgsql-%[1]d/bin`, // Red Hat + }, ":")+ + `${PATH+:${PATH}}"`, postgresVersion) +} + // reloadCommand returns an entrypoint that convinces PostgreSQL to reload // certificate files when they change. The process will appear as name in `ps` // and `top`. diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index c0960ac27..ffd227f4b 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -547,6 +547,37 @@ func TestBashSafeLink(t *testing.T) { }) } +func TestShellPath(t *testing.T) { + t.Parallel() + + script := ShellPath(11) + + assert.Assert(t, cmp.Contains(script, `/usr/lib/postgresql/11/bin`)) + assert.Assert(t, cmp.Contains(script, `/usr/libexec/postgresql11`)) + assert.Assert(t, cmp.Contains(script, `/usr/pgsql-11/bin`)) + + t.Run("ShellCheckPOSIX", func(t *testing.T) { + shellcheck := require.ShellCheck(t) + + dir := t.TempDir() + file := filepath.Join(dir, "script.sh") + assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) + + // Expect ShellCheck for "sh" to be happy. + // - https://www.shellcheck.net/wiki/SC2148 + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", "--shell=sh", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + }) + + t.Run("PrettyYAML", func(t *testing.T) { + b, err := yaml.Marshal(script) + assert.NilError(t, err) + assert.Assert(t, !strings.Contains(string(b), `\n`), "expected literal flow scalar, got:\n%s", b) + assert.Equal(t, 1, strings.Count(string(b), "\n"), "expected one trailing newline, got:\n%s", b) + }) +} + func TestStartupCommand(t *testing.T) { shellcheck := require.ShellCheck(t) t.Parallel() From 380eb6c5cd0ee61f9c7015467c7d222f7b4a2723 Mon Sep 17 00:00:00 2001 From: Benjamin Blattberg Date: Tue, 30 Sep 2025 14:29:19 -0500 Subject: [PATCH 31/43] Github operator build update (#4305) Update GitHub builds * Adjust chmod for licenses, queries * Adjust license aggregation Issues: [PGO-2695] Co-authored-by: Chris Bandy --- .dockerignore | 1 + Dockerfile | 7 +- hack/extract-licenses.go | 236 +++++++++++++++++++++++++++++++++++++++ licenses/.gitignore | 1 - 4 files changed, 242 insertions(+), 3 deletions(-) create mode 100644 hack/extract-licenses.go delete mode 100644 licenses/.gitignore diff --git a/.dockerignore b/.dockerignore index 6ff2842b8..32fab58f6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,4 +3,5 @@ /.git /bin /hack +!/hack/extract-licenses.go !/hack/tools/queries diff --git a/Dockerfile b/Dockerfile index a218dfe49..6fed212c2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,12 +10,15 @@ COPY hack/tools/queries /opt/crunchy/conf WORKDIR /usr/src/app COPY . . ENV GOCACHE=/var/cache/go + +# Build the operator and assemble the licenses RUN --mount=type=cache,target=/var/cache/go go build ./cmd/postgres-operator +RUN go run ./hack/extract-licenses.go licenses postgres-operator FROM docker.io/library/debian:bookworm -COPY --from=build /licenses /licenses -COPY --from=build /opt/crunchy/conf /opt/crunchy/conf +COPY --from=build --chmod=0444 /usr/src/app/licenses /licenses +COPY --from=build --chmod=0444 /opt/crunchy/conf /opt/crunchy/conf COPY --from=build /usr/src/app/postgres-operator /usr/local/bin USER 2 diff --git a/hack/extract-licenses.go b/hack/extract-licenses.go new file mode 100644 index 000000000..5bc0ab923 --- /dev/null +++ b/hack/extract-licenses.go @@ -0,0 +1,236 @@ +// Copyright 2024 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "bytes" + "context" + "encoding/csv" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "os/signal" + "path/filepath" + "slices" + "strings" + "syscall" +) + +func main() { + flags := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + flags.Usage = func() { + fmt.Fprintln(flags.Output(), strings.TrimSpace(` +Usage: `+flags.Name()+` {directory} {executables...} + +This program downloads and extracts the licenses of Go modules used to build +Go executables. + +The first argument is a directory that will receive license files. It will be +created if it does not exist. This program will overwrite existing files but +not delete them. Remaining arguments must be Go executables. + +Go modules are downloaded to the Go module cache which can be changed via +the environment: https://go.dev/ref/mod#module-cache`, + )) + } + if _ = flags.Parse(os.Args[1:]); flags.NArg() < 2 || slices.ContainsFunc( + os.Args, func(arg string) bool { return arg == "-help" || arg == "--help" }, + ) { + flags.Usage() + os.Exit(2) + } + + ctx, cancel := context.WithCancel(context.Background()) + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt, syscall.SIGTERM) + go func() { <-signals; cancel() }() + + // Create the target directory. + if err := os.MkdirAll(flags.Arg(0), 0o755); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + // Extract module information from remaining arguments. + modules := identifyModules(ctx, flags.Args()[1:]...) + + // Ignore packages from Crunchy Data. Most are not available in any [proxy], + // and we handle their licenses elsewhere. + // + // This is also a quick fix to avoid the [replace] directive in our projects. + // The logic below cannot handle them. Showing xxhash versus a replace: + // + // dep github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= + // dep github.com/crunchydata/postgres-operator v0.0.0-00010101000000-000000000000 + // => ./postgres-operator (devel) + // + // [proxy]: https://go.dev/ref/mod#module-proxy + // [replace]: https://go.dev/ref/mod#go-mod-file-replace + modules = slices.DeleteFunc(modules, func(s string) bool { + return strings.HasPrefix(s, "github.com/crunchydata/") + }) + + // Download modules to the Go module cache. + directories := downloadModules(ctx, modules...) + + // Gather license files from every module into the target directory. + for module, directory := range directories { + for _, license := range findLicenses(directory) { + relative := module + strings.TrimPrefix(license, directory) + destination := filepath.Join(flags.Arg(0), relative) + + var data []byte + err := ctx.Err() + + if err == nil { + err = os.MkdirAll(filepath.Dir(destination), 0o755) + } + if err == nil { + data, err = os.ReadFile(license) + } + if err == nil { + // When we copy the licenses in the Dockerfiles, make sure + // to `--chmod` them to an appropriate permissions, e.g., 0o444 + err = os.WriteFile(destination, data, 0o600) + } + if err == nil { + fmt.Fprintln(os.Stdout, license, "=>", destination) + } + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + } +} + +func downloadModules(ctx context.Context, modules ...string) map[string]string { + var stdout bytes.Buffer + + // Download modules and read their details into a series of JSON objects. + // - https://go.dev/ref/mod#go-mod-download + //gosec:disable G204 -- Use this environment variable to switch Go versions without touching PATH + cmd := exec.CommandContext(ctx, os.Getenv("GO"), append([]string{"mod", "download", "-json"}, modules...)...) + if cmd.Path == "" { + cmd.Path, cmd.Err = exec.LookPath("go") + } + cmd.Stderr = os.Stderr + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(cmd.ProcessState.ExitCode()) + } + + decoder := json.NewDecoder(&stdout) + results := make(map[string]string, len(modules)) + + // NOTE: The directory in the cache is a normalized spelling of the module path; + // ask Go for the directory; do not try to spell it yourself. + // - https://go.dev/ref/mod#module-cache + // - https://go.dev/ref/mod#module-path + for { + var module struct { + Path string `json:"path,omitempty"` + Version string `json:"version,omitempty"` + Dir string `json:"dir,omitempty"` + } + err := decoder.Decode(&module) + + if err == nil { + results[module.Path+"@"+module.Version] = module.Dir + continue + } + if errors.Is(err, io.EOF) { + break + } + + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + return results +} + +func findLicenses(directory string) []string { + var results []string + + // Syft maintains a list of license filenames that began as a list maintained by + // Go. We gather a similar list by matching on "copying" and "license" filenames. + // - https://pkg.go.dev/github.com/anchore/syft@v1.3.0/internal/licenses#FileNames + // + // Ignore Go files and anything in the special "testdata" directory. + // - https://go.dev/cmd/go + err := filepath.WalkDir(directory, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() && d.Name() == "testdata" { + return fs.SkipDir + } + if d.IsDir() || strings.HasSuffix(path, ".go") { + return err + } + + lower := strings.ToLower(d.Name()) + if strings.Contains(lower, "copying") || strings.Contains(lower, "license") { + results = append(results, path) + } + + return err + }) + + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + return results +} + +func identifyModules(ctx context.Context, executables ...string) []string { + var stdout bytes.Buffer + + // Use `go version -m` to read the embedded module information as a text table. + // - https://go.dev/ref/mod#go-version-m + //gosec:disable G204 -- Use this environment variable to switch Go versions without touching PATH + cmd := exec.CommandContext(ctx, os.Getenv("GO"), append([]string{"version", "-m"}, executables...)...) + if cmd.Path == "" { + cmd.Path, cmd.Err = exec.LookPath("go") + } + cmd.Stderr = os.Stderr + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(cmd.ProcessState.ExitCode()) + } + + // Parse the tab-separated table without checking row lengths. + reader := csv.NewReader(&stdout) + reader.Comma = '\t' + reader.FieldsPerRecord = -1 + + lines, _ := reader.ReadAll() + result := make([]string, 0, len(lines)) + + for _, fields := range lines { + if len(fields) > 3 && fields[1] == "dep" { + result = append(result, fields[2]+"@"+fields[3]) + } + if len(fields) > 4 && fields[1] == "mod" && fields[4] != "" { + result = append(result, fields[2]+"@"+fields[3]) + } + } + + // The `go version -m` command returns no information for empty files, and it + // is possible for a Go executable to have no main module and no dependencies. + if len(result) == 0 { + fmt.Fprintf(os.Stderr, "no Go modules in %v\n", executables) + os.Exit(0) + } + + return result +} diff --git a/licenses/.gitignore b/licenses/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/licenses/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* From 1c5291891e0119610e0ea528cfbee154d49024fb Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Mon, 22 Sep 2025 16:33:57 -0500 Subject: [PATCH 32/43] Proceed with major upgrade when data directory lacks checksums This can happen when a data directory is imported from an external system. Issue: PGO-619 --- internal/controller/pgupgrade/jobs.go | 22 +++++++++++++++++++++- internal/controller/pgupgrade/jobs_test.go | 7 +++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index 9dbf76ea5..6f6379b8e 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -117,8 +117,28 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s // steps used and command flag specifics can be found in the documentation: // - https://www.postgresql.org/docs/current/pgupgrade.html + // Examine the old data directory. + `control=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/pg_controldata")`, + `read -r checksums <<< "${control##*page checksum version:}"`, + + // Data checksums on the old and new data directories must match. + // Configuring these checksums depends on the version of initdb: + // + // - PostgreSQL v17 and earlier: disabled by default, enable with "--data-checksums" + // - PostgreSQL v18: enabled by default, enable with "--data-checksums", disable with "--no-data-checksums" + // + // https://www.postgresql.org/docs/release/18#RELEASE-18-MIGRATION + // + // Data page checksum version zero means checksums are disabled. + // Produce an initdb argument that enables or disables data checksums. + // + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_11_0;f=src/bin/pg_verify_checksums/pg_verify_checksums.c#l303 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_12_0;f=src/bin/pg_checksums/pg_checksums.c#l523 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_18_0;f=src/bin/pg_checksums/pg_checksums.c#l571 + `checksums=$(if [[ "${checksums}" -gt 0 ]]; then echo '--data-checksums'; elif [[ "${new_version}" -ge 18 ]]; then echo '--no-data-checksums'; fi)`, + `section 'Step 3 of 7: Initializing new data directory...'`, - `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access --data-checksums` + argEncryptionKeyCommand, + `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access ${checksums}` + argEncryptionKeyCommand, // Read the configured value then quote it; every single-quote U+0027 is replaced by two. // diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index 9c9e00063..cd96a4a3e 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -222,8 +222,11 @@ spec: (set -x && [[ "$("${old_bin}/postgres" --version)" =~ ") ${old_version}"($|[^0-9]) ]]) (set -x && [[ "$("${new_bin}/initdb" --version)" =~ ") ${new_version}"($|[^0-9]) ]]) cd "${data_volume}" + control=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/pg_controldata") + read -r checksums <<< "${control##*page checksum version:}" + checksums=$(if [[ "${checksums}" -gt 0 ]]; then echo '--data-checksums'; elif [[ "${new_version}" -ge 18 ]]; then echo '--no-data-checksums'; fi) section 'Step 3 of 7: Initializing new data directory...' - PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access --data-checksums + PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access ${checksums} section 'Step 4 of 7: Copying shared_preload_libraries parameter...' value=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/postgres" -C shared_preload_libraries) echo >> "${new_data}/postgresql.conf" "shared_preload_libraries = '${value//$'\''/$'\'\''}'" @@ -272,7 +275,7 @@ status: {} tdeJob := reconciler.generateUpgradeJob(ctx, upgrade, startup, "echo testKey") assert.Assert(t, cmp.MarshalContains(tdeJob, - `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access --data-checksums --encryption-key-command='echo testKey'`)) + `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access ${checksums} --encryption-key-command='echo testKey'`)) } func TestGenerateRemoveDataJob(t *testing.T) { From b9fb1d8e0b619d414c7e71e3635843fdc849bc41 Mon Sep 17 00:00:00 2001 From: Benjamin Blattberg Date: Wed, 1 Oct 2025 13:23:30 -0500 Subject: [PATCH 33/43] Allow quotes in license extraction (#4309) Some non-Crunchy Go binaries may have quotes in the build data, e.g., `ldflag` settings. Setting `LazyQuotes` means we won't error on those quotes, but continue with parsing the dep and build fields. --- hack/extract-licenses.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hack/extract-licenses.go b/hack/extract-licenses.go index 5bc0ab923..3f75e2fc2 100644 --- a/hack/extract-licenses.go +++ b/hack/extract-licenses.go @@ -208,10 +208,12 @@ func identifyModules(ctx context.Context, executables ...string) []string { os.Exit(cmd.ProcessState.ExitCode()) } - // Parse the tab-separated table without checking row lengths. + // Parse the tab-separated table without checking row lengths + // and without enforcing strict quote mark rules. reader := csv.NewReader(&stdout) reader.Comma = '\t' reader.FieldsPerRecord = -1 + reader.LazyQuotes = true lines, _ := reader.ReadAll() result := make([]string, 0, len(lines)) From ca7c3cd88f4bd7673edd785b949b455b131f2b1c Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Mon, 8 Sep 2025 10:45:19 -0500 Subject: [PATCH 34/43] Add documentation about CRD versions and validation --- .../README.md | 121 ++++++++++++++++++ .../validation.md | 97 ++++++++++++-- 2 files changed, 210 insertions(+), 8 deletions(-) create mode 100644 pkg/apis/postgres-operator.crunchydata.com/README.md diff --git a/pkg/apis/postgres-operator.crunchydata.com/README.md b/pkg/apis/postgres-operator.crunchydata.com/README.md new file mode 100644 index 000000000..ef314de19 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/README.md @@ -0,0 +1,121 @@ + + +# Custom Resource Definitions + +These directories contain Go types that serve as [DTO]s for communicating with the [Kubernetes API]. +We use [controller-gen] to produce [CRD]s based on these Go types with [schemas](validation.md) that match. + +This [directory](.) contains our API Group, `postgres-operator.crunchydata.com`, and each subdirectory is a version: + +- v1beta1 is compatible with Kubernetes 1.30, OpenShift 4.14, and later +- v1 uses newer CRD features and requires Kubernetes 1.30, OpenShift 4.17, and later + +``` +pkg/apis/postgres-operator.crunchydata.com +├── v1 +└── v1beta1 +``` + +[controller-gen]: https://book.kubebuilder.io/reference/controller-gen +[CRD]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions +[DTO]: https://martinfowler.com/eaaCatalog/dataTransferObject.html +[Kubernetes API]: https://docs.k8s.io/concepts/overview/kubernetes-api + + +# CRD Versions + +Kubernetes organizes API resources into Groups. Each resource is represented by a Kind that can have multiple Versions. The shape of a CRD reflects this: + +```yaml +kind: CustomResourceDefinition +metadata: + name: "ideas.example.com" # {spec.plural}.{spec.group} +spec: + group: "example.com" # one group (G) + names: + kind: Idea # one kind (K) + plural: ideas # one resource (R) + singular: idea # one resource (R) + versions: # many versions (V) + - name: v1beta1 + schema: … + - name: v1 + schema: … +``` + + + +Every Kubernetes API request includes the Group, Resource, Version, and Kind of its payload and expected response. +The version affects how Kubernetes handles the request, but it does *not* affect how Kubernetes stores the result. +Every Kubernetes [object] is stored according to its Group, Resource, Namespace, and Name. + +> [!NOTE] +> - The API request URL contains the Group + Version + Resource (GVR). +> - The API request body includes the Group + Version (GV) as [`apiVersion`] and Kind (K) as `kind`. +> - [RBAC] matches on the Group + Resource (GR) of an API request. +> - The etcd key of each object contains the Group + Resource (GR), Namespace and Name. + +This allows a variety of clients to concurrently use whichever API versions they understand. +Kubernetes converts what is stored to or from the version in the API request. +This means, however, that *every* version of a resource **must** be equivalent *every other* version. + +Each CRD indicates which versions Kubernetes should accept from clients with `served=true`. +Kubernetes stores custom resource objects in the *single* version indicated with `storage=true`. + +> [!IMPORTANT] +> We use the `None` conversion strategy and [validation ratcheting](validation.md#validation-ratcheting)... + +[`apiVersion`]: https://docs.k8s.io/reference/using-api#api-groups +[object]: https://docs.k8s.io/concepts/overview/working-with-objects +[RBAC]: https://docs.k8s.io/reference/kubernetes-api/authorization-resources/role-v1 + + diff --git a/pkg/apis/postgres-operator.crunchydata.com/validation.md b/pkg/apis/postgres-operator.crunchydata.com/validation.md index 49a243d4c..92b9fa11b 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/validation.md +++ b/pkg/apis/postgres-operator.crunchydata.com/validation.md @@ -4,10 +4,9 @@ # SPDX-License-Identifier: Apache-2.0 --> -# Custom Resource Definitions +# Custom Resource Definition Schemas -These directories contain Go types that serve as [DTO]s for communicating with the Kubernetes API. -We use the [controller-gen] tool to produce [CRD]s with schemas that match the Go types. +These directories contain Go types that [controller-gen] uses to generate matching [CRD] schemas. The CRD schema tells Kubernetes what fields and values are allowed in our API objects and how to handle changes to values. > [!TIP] @@ -15,7 +14,7 @@ The CRD schema tells Kubernetes what fields and values are allowed in our API ob CRD schemas are modified OpenAPI 3.0 [validation] schemas. Much of the schema defines what fields, types, and values are *allowed*. -`controller-gen` considers the [Go type] of a field and its [validation markers] for this. +`controller-gen` considers the field's [Go type] and [validation markers] for this. Kubernetes uses its own algorithm to consider and accept changes to API objects: [Server-Side Apply], SSA. CRD schemas contain non-standard attributes that affect SSA. @@ -25,9 +24,6 @@ CRD schemas contain non-standard attributes that affect SSA. [controller-gen]: https://book.kubebuilder.io/reference/controller-gen [CRD]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions -[DTO]: https://martinfowler.com/eaaCatalog/dataTransferObject.html -[Go type]: https://go.dev/ref/spec#Types -[Kubernetes API]: https://docs.k8s.io/concepts/overview/kubernetes-api [processing markers]: https://book.kubebuilder.io/reference/markers/crd-processing [Server-Side Apply]: https://docs.k8s.io/reference/using-api/server-side-apply [validation]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions#validation @@ -92,7 +88,7 @@ The `additionalProperties` property indicates that the keys are unknown; these f # CEL Rules > [!IMPORTANT] -> When possible, use [OpenAPI properties](#FIXME) rather than CEL rules. +> When possible, use [OpenAPI properties](#openapi-properties) rather than CEL rules. > The former do not affect the CRD [validation budget](#FIXME). ## Optional field syntax @@ -109,3 +105,88 @@ likewise be considered optional. The optional field syntax is only available in K8s 1.29+. [optional field marker]: https://pkg.go.dev/github.com/google/cel-go/cel#hdr-Syntax_Changes-OptionalTypes. + +## CEL Availability + +Kubernetes' capabilities with CEL are continuously expanding. +Different versions of Kubernetes have different CEL functions, syntax, and features. + +```asciidoc +:controller-tools: https://github.com/kubernetes-sigs/controller-tools/releases + +[cols=",,", options="header"] +|=== +| Kubernetes | OpenShift | `controller-gen` + +| 1.25 Beta, `CustomResourceValidationExpressions` gate +| OCP 4.12 +| link:{controller-tools}/v0.9.0[v0.9.0] has `rule` and `message` fields on the `XValidation` marker + +| 1.27 adds `messageExpression` +| OCP 4.14 +| link:{controller-tools}/v0.15.0[v0.15.0] adds `messageExpression` field to the `XValidation` marker + +| 1.28 adds `reason` and `fieldPath` +| OCP 4.15 +| link:{controller-tools}/v0.16.0[v0.16.0] adds `reason` and `fieldPath` to the `XValidation` marker + +| 1.29 GA | OCP 4.16 | + +| 1.30 enables link:#validation-ratcheting[validation ratcheting]; link:https://pr.k8s.io/123475[fixes fieldPath]… +| OCP 4.17 +| link:{controller-tools}/v0.17.3[v0.17.3] adds `optionalOldSelf` to the `XValidation` marker + +| 1.34 link:https://pr.k8s.io/132837[fixes IntOrString cost] +| ? +| link:{controller-tools}/v0.18.0[v0.18.0] allows validation on IntOrString + +| 1.35 link:https://pr.k8s.io/132798[shows values when validation fails] +| ? +| n/a + +|=== +``` + + + +Some details are missing from the Go package documentation: https://pr.k8s.io/130660 + +| CEL [libraries](https://code.k8s.io/staging/src/k8s.io/apiserver/pkg/cel/library), extensions, etc. | Kubernetes | OpenShift | +| --- | --- | --- | +| kubernetes.authz | 1.28 | +| kubernetes.authzSelectors | 1.32 | +| kubernetes.format | 1.32 | [4.18](https://github.com/openshift/kubernetes/pull/2140) | +| kubernetes.lists | 1.24 | 4.12 | +| kubernetes.net.cidr | 1.31 | [4.16](https://github.com/openshift/kubernetes/pull/1828) | +| kubernetes.net.ip | 1.31 | [4.16](https://github.com/openshift/kubernetes/pull/1828) | +| kubernetes.quantity | 1.29 | 4.16 | +| kubernetes.regex | 1.24 | 4.12 | +| kubernetes.urls | 1.24 | 4.12 | +| [cross-type numeric comparison](https://pkg.go.dev/github.com/google/cel-go/cel#CrossTypeNumericComparisons) | 1.29 | 4.16 | +| [optional types](https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes) | 1.29 | 4.16 | +| [strings](https://pkg.go.dev/github.com/google/cel-go/ext#Strings) v0 | 1.24 | 4.12 | +| [strings](https://pkg.go.dev/github.com/google/cel-go/ext#Strings) v2 | 1.30 | 4.17 | +| [sets](https://pkg.go.dev/github.com/google/cel-go/ext#Sets) | 1.30 | 4.17 | +| [two-variable comprehension](https://pkg.go.dev/github.com/google/cel-go/ext#TwoVarComprehensions) | 1.33 | + + +# Validation Ratcheting + +> **Feature Gate:** `CRDValidationRatcheting` +> +> Enabled in Kubernetes 1.30 and GA in 1.33 (OpenShift 4.17 and ~4.20) + +[Validation ratcheting] allows update operations to succeed when unchanged fields are invalid. +This allows CRDs to add or "tighten" validation without breaking existing CR objects. + +Some schema changes are not ratcheted: + +- OpenAPI `allOf`, `oneOf`, `anyOf`, `not`; values in fields with these must be valid +- OpenAPI `required`; required fields are always required +- Removing `additionalProperties`; undefined fields are always dropped +- Adding or removing fields (names) in `properties`; undefined fields are dropped, and values in new fields must be valid +- Changes to `x-kubernetes-list-type` or `x-kubernetes-list-map-keys`; values in these fields must be valid +- Rules containing `oldSelf`; these are [transition rules] and should do their own ratcheting + +[transition rules]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions#transition-rules +[Validation ratcheting]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions#validation-ratcheting From f00b229e29eb518ba34af848b346339ea37f8145 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Tue, 12 Aug 2025 13:37:10 -0500 Subject: [PATCH 35/43] Cache Go modules in development builds --- Dockerfile | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6fed212c2..b53f2c071 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,11 +9,15 @@ COPY hack/tools/queries /opt/crunchy/conf WORKDIR /usr/src/app COPY . . -ENV GOCACHE=/var/cache/go -# Build the operator and assemble the licenses -RUN --mount=type=cache,target=/var/cache/go go build ./cmd/postgres-operator -RUN go run ./hack/extract-licenses.go licenses postgres-operator +ENV GOCACHE=/var/cache/go +ENV GOMODCACHE=/var/cache/gomod +RUN --mount=type=cache,target=/var/cache \ +<<-SHELL +set -e +go build ./cmd/postgres-operator +go run ./hack/extract-licenses.go licenses postgres-operator +SHELL FROM docker.io/library/debian:bookworm From 80512c5a1403a7db07c4937e54902672f62f8ea3 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Thu, 2 Oct 2025 18:05:54 -0500 Subject: [PATCH 36/43] Prepare file permissions in the build stage Some files and directories were not readable by the image user. Issue: PGO-2695 --- Dockerfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index b53f2c071..f6d60b699 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,9 +4,6 @@ FROM docker.io/library/golang:bookworm AS build -COPY licenses /licenses -COPY hack/tools/queries /opt/crunchy/conf - WORKDIR /usr/src/app COPY . . @@ -17,12 +14,15 @@ RUN --mount=type=cache,target=/var/cache \ set -e go build ./cmd/postgres-operator go run ./hack/extract-licenses.go licenses postgres-operator + +find ./hack/tools/queries '(' -type d -exec chmod 0555 '{}' + ')' -o '(' -type f -exec chmod 0444 '{}' + ')' +find ./licenses '(' -type d -exec chmod 0555 '{}' + ')' -o '(' -type f -exec chmod 0444 '{}' + ')' SHELL FROM docker.io/library/debian:bookworm -COPY --from=build --chmod=0444 /usr/src/app/licenses /licenses -COPY --from=build --chmod=0444 /opt/crunchy/conf /opt/crunchy/conf +COPY --from=build /usr/src/app/licenses /licenses +COPY --from=build /usr/src/app/hack/tools/queries /opt/crunchy/conf COPY --from=build /usr/src/app/postgres-operator /usr/local/bin USER 2 From 35ebaedc98139d0c6bbfd467829c58981cd46009 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Fri, 19 Sep 2025 11:20:33 -0500 Subject: [PATCH 37/43] Move kubebuilder annotations to YAML comments --- ....crunchydata.com_crunchybridgeclusters.yaml | 3 +-- ...gres-operator.crunchydata.com_pgadmins.yaml | 3 +-- ...es-operator.crunchydata.com_pgupgrades.yaml | 3 +-- ...rator.crunchydata.com_postgresclusters.yaml | 3 +-- internal/crd/post-process.go | 7 ++++++- internal/crd/post-process.jq | 18 ++++++++++++++++++ 6 files changed, 28 insertions(+), 9 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml index 6add75dad..f238bf942 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.17.3 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: crunchybridgeclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index 85476b8db..7a07b7776 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.17.3 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: pgadmins.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 240853746..7250326d0 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.17.3 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: pgupgrades.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 3fef71364..16fc4924c 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.17.3 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com diff --git a/internal/crd/post-process.go b/internal/crd/post-process.go index 5aac23089..3117e16ac 100644 --- a/internal/crd/post-process.go +++ b/internal/crd/post-process.go @@ -12,6 +12,7 @@ import ( "log/slog" "os" "path/filepath" + "regexp" "github.com/itchyny/gojq" "sigs.k8s.io/yaml" @@ -44,8 +45,12 @@ func main() { panic(err) } + // Turn top-level strings that start with octothorpe U+0023 into YAML comments by removing their quotes. + yamlData := need(yaml.Marshal(v)) + yamlData = regexp.MustCompile(`(?m)^'(#[^']*)'(.*)$`).ReplaceAll(yamlData, []byte("$1$2")) + slog.Info("Writing", "file", yamlName) - must(os.WriteFile(yamlPath, append([]byte("---\n"), need(yaml.Marshal(v))...), 0o644)) + must(os.WriteFile(yamlPath, append([]byte("---\n"), yamlData...), 0o644)) } if _, ok := result.Next(); ok { diff --git a/internal/crd/post-process.jq b/internal/crd/post-process.jq index 41b2faa29..8325ca47f 100644 --- a/internal/crd/post-process.jq +++ b/internal/crd/post-process.jq @@ -64,4 +64,22 @@ reduce paths(try .["x-kubernetes-int-or-string"] == true) as $path (.; end ) | +# Rename Kubebuilder annotations and move them to the top-level. +# The caller can turn these into YAML comments. +. += (.metadata.annotations | with_entries(select(.key | startswith("controller-gen.kubebuilder")) | .key = "# \(.key)")) | +.metadata.annotations |= with_entries(select(.key | startswith("controller-gen.kubebuilder") | not)) | + +# Remove nulls and empty objects from metadata. +# Some very old generators would set a null creationTimestamp. +# +# https://github.com/kubernetes-sigs/controller-tools/issues/402 +# https://issue.k8s.io/67610 +del(.metadata | .. | select(length == 0)) | + +# Remove status to avoid conflicts with the CRD controller. +# Some very old generators would set this field. +# +# https://github.com/kubernetes-sigs/controller-tools/issues/456 +del(.status) | + . From 520a7da6d475c4c57c2934a316a0753e73cbe95d Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Tue, 30 Sep 2025 11:09:04 -0500 Subject: [PATCH 38/43] Bump sigs.k8s.io/controller-tools to v0.18.0 This version adds Title markers and accepts "optionalOldSelf" of Kubernetes 1.30 on XValidation markers. See: https://github.com/kubernetes-sigs/controller-tools/releases/v0.18.0 --- ...es-operator.crunchydata.com_crunchybridgeclusters.yaml | 2 +- .../bases/postgres-operator.crunchydata.com_pgadmins.yaml | 2 +- .../postgres-operator.crunchydata.com_pgupgrades.yaml | 2 +- ...ostgres-operator.crunchydata.com_postgresclusters.yaml | 2 +- go.mod | 4 +++- go.sum | 8 ++++++-- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml index f238bf942..5313e686e 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -1,5 +1,5 @@ --- -# controller-gen.kubebuilder.io/version: v0.17.3 +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index 7a07b7776..e5f64b29f 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -1,5 +1,5 @@ --- -# controller-gen.kubebuilder.io/version: v0.17.3 +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 7250326d0..97bc242fd 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -1,5 +1,5 @@ --- -# controller-gen.kubebuilder.io/version: v0.17.3 +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 16fc4924c..b993610c0 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1,5 +1,5 @@ --- -# controller-gen.kubebuilder.io/version: v0.17.3 +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/go.mod b/go.mod index 3cdfaca91..74914ddeb 100644 --- a/go.mod +++ b/go.mod @@ -133,9 +133,11 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.33.0 // indirect k8s.io/apiserver v0.33.0 // indirect + k8s.io/code-generator v0.33.0 // indirect + k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect - sigs.k8s.io/controller-tools v0.17.3 // indirect + sigs.k8s.io/controller-tools v0.18.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index baf3d4f93..e65172ea2 100644 --- a/go.sum +++ b/go.sum @@ -354,8 +354,12 @@ k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/code-generator v0.33.0 h1:B212FVl6EFqNmlgdOZYWNi77yBv+ed3QgQsMR8YQCw4= +k8s.io/code-generator v0.33.0/go.mod h1:KnJRokGxjvbBQkSJkbVuBbu6z4B0rC7ynkpY5Aw6m9o= k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= @@ -366,8 +370,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUo sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= -sigs.k8s.io/controller-tools v0.17.3 h1:lwFPLicpBKLgIepah+c8ikRBubFW5kOQyT88r3EwfNw= -sigs.k8s.io/controller-tools v0.17.3/go.mod h1:1ii+oXcYZkxcBXzwv3YZBlzjt1fvkrCGjVF73blosJI= +sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE= +sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= From 871dbc04bcc0364383066104d7a9db345f93e694 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Tue, 30 Sep 2025 11:14:01 -0500 Subject: [PATCH 39/43] Make ImageVolumeSource.Reference required via OpenAPI OpenAPI validation does not count toward the CRD XValidation CEL budget. --- ...res-operator.crunchydata.com_pgadmins.yaml | 17 +- ...ator.crunchydata.com_postgresclusters.yaml | 266 +++++++++++------- internal/crd/post-process.jq | 13 + .../validation/pgadmin_test.go | 0 .../validation/pgbackrest_test.go | 0 .../validation/pgbouncer_test.go | 0 .../postgres_authentication_test.go | 0 .../postgrescluster/postgres_config_test.go | 0 .../postgrescluster/postgres_users_test.go | 0 .../validation/postgrescluster_test.go | 40 ++- .../v1beta1/postgrescluster_types.go | 3 + .../v1beta1/shared_types.go | 9 +- 12 files changed, 233 insertions(+), 115 deletions(-) rename internal/{testing => crd}/validation/pgadmin_test.go (100%) rename internal/{testing => crd}/validation/pgbackrest_test.go (100%) rename internal/{testing => crd}/validation/pgbouncer_test.go (100%) rename internal/{testing => crd}/validation/postgrescluster/postgres_authentication_test.go (100%) rename internal/{testing => crd}/validation/postgrescluster/postgres_config_test.go (100%) rename internal/{testing => crd}/validation/postgrescluster/postgres_users_test.go (100%) rename internal/{testing => crd}/validation/postgrescluster_test.go (82%) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index e5f64b29f..b2c030120 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -2620,7 +2620,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -2629,6 +2631,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -2638,7 +2645,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -2659,11 +2669,8 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index b993610c0..769d866e4 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1567,7 +1567,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -1576,6 +1578,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -1585,7 +1592,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -1606,14 +1616,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using - an ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must - set a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -3030,7 +3035,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -3039,6 +3046,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -3048,7 +3060,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -3069,14 +3084,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using - an ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must - set a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -4496,7 +4506,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -4505,6 +4517,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -4514,7 +4531,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -4535,14 +4555,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using - an ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must - set a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -6929,7 +6944,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -6938,6 +6955,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -6947,7 +6969,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -6968,14 +6993,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -8076,7 +8096,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -8085,6 +8107,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -8094,7 +8121,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -8115,14 +8145,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -11428,7 +11453,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -11437,6 +11464,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -11446,7 +11478,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -11467,13 +11502,8 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -16468,7 +16498,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -16477,6 +16509,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -16486,7 +16523,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -16507,14 +16547,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -20640,7 +20675,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -20649,6 +20686,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -20658,7 +20700,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -20679,14 +20724,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using - an ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must - set a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -22103,7 +22143,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -22112,6 +22154,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -22121,7 +22168,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -22142,14 +22192,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using - an ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must - set a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -23569,7 +23614,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -23578,6 +23625,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -23587,7 +23639,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -23608,14 +23663,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using - an ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must - set a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -25980,7 +26030,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -25989,6 +26041,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -25998,7 +26055,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -26019,14 +26079,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -27127,7 +27182,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -27136,6 +27193,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -27145,7 +27207,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -27166,14 +27231,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -30479,7 +30539,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -30488,6 +30550,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -30497,7 +30564,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -30518,13 +30588,8 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -35518,7 +35583,9 @@ spec: type: array x-kubernetes-list-type: set image: - description: Details for adding an image volume + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image properties: pullPolicy: description: |- @@ -35527,6 +35594,11 @@ spec: Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 type: string reference: description: |- @@ -35536,7 +35608,10 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + minLength: 1 type: string + required: + - reference type: object name: description: |- @@ -35557,14 +35632,9 @@ spec: x-kubernetes-validations: - message: you must set only one of image or claimName rule: has(self.claimName) != has(self.image) - - message: readOnly cannot be set false when using an - ImageVolumeSource + - message: image volumes must be readOnly rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' - - message: if using an ImageVolumeSource, you must set - a reference - rule: '!has(self.image) || (self.?image.reference.hasValue() - && self.image.reference.size() > 0)' maxItems: 10 type: array x-kubernetes-list-map-keys: diff --git a/internal/crd/post-process.jq b/internal/crd/post-process.jq index 8325ca47f..fccf0a9d7 100644 --- a/internal/crd/post-process.jq +++ b/internal/crd/post-process.jq @@ -3,6 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # # This [jq] filter modifies a Kubernetes CustomResourceDefinition. +# Use the controller-gen "+kubebuilder:title" marker to identify schemas that need special manipulation. # # [jq]: https://jqlang.org @@ -10,6 +11,18 @@ # https://jqlang.org/manual#multiplication-division-modulo def merge(stream): reduce stream as $i ({}; . * $i); +# https://pkg.go.dev/k8s.io/api/core/v1#ImageVolumeSource +reduce paths(try .title == "$corev1.ImageVolumeSource") as $path (.; + getpath($path) as $schema | + setpath($path; $schema * { + required: (["reference"] + ($schema.required // []) | sort), + properties: { + pullPolicy: { enum: ["Always", "Never", "IfNotPresent"] }, + reference: { minLength: 1 } + } + } | del(.title)) +) | + # Kubernetes assumes the evaluation cost of an enum value is very large: https://issue.k8s.io/119511 # Look at every schema that has a populated "enum" property. reduce paths(try .enum | length > 0) as $path (.; diff --git a/internal/testing/validation/pgadmin_test.go b/internal/crd/validation/pgadmin_test.go similarity index 100% rename from internal/testing/validation/pgadmin_test.go rename to internal/crd/validation/pgadmin_test.go diff --git a/internal/testing/validation/pgbackrest_test.go b/internal/crd/validation/pgbackrest_test.go similarity index 100% rename from internal/testing/validation/pgbackrest_test.go rename to internal/crd/validation/pgbackrest_test.go diff --git a/internal/testing/validation/pgbouncer_test.go b/internal/crd/validation/pgbouncer_test.go similarity index 100% rename from internal/testing/validation/pgbouncer_test.go rename to internal/crd/validation/pgbouncer_test.go diff --git a/internal/testing/validation/postgrescluster/postgres_authentication_test.go b/internal/crd/validation/postgrescluster/postgres_authentication_test.go similarity index 100% rename from internal/testing/validation/postgrescluster/postgres_authentication_test.go rename to internal/crd/validation/postgrescluster/postgres_authentication_test.go diff --git a/internal/testing/validation/postgrescluster/postgres_config_test.go b/internal/crd/validation/postgrescluster/postgres_config_test.go similarity index 100% rename from internal/testing/validation/postgrescluster/postgres_config_test.go rename to internal/crd/validation/postgrescluster/postgres_config_test.go diff --git a/internal/testing/validation/postgrescluster/postgres_users_test.go b/internal/crd/validation/postgrescluster/postgres_users_test.go similarity index 100% rename from internal/testing/validation/postgrescluster/postgres_users_test.go rename to internal/crd/validation/postgrescluster/postgres_users_test.go diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/crd/validation/postgrescluster_test.go similarity index 82% rename from internal/testing/validation/postgrescluster_test.go rename to internal/crd/validation/postgrescluster_test.go index 7060a7933..e491c47d2 100644 --- a/internal/testing/validation/postgrescluster_test.go +++ b/internal/crd/validation/postgrescluster_test.go @@ -14,6 +14,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -110,6 +111,7 @@ func TestPostgresUserInterfaceAcrossVersions(t *testing.T) { func TestAdditionalVolumes(t *testing.T) { ctx := context.Background() cc := require.KubernetesAtLeast(t, "1.30") + dryrun := client.NewDryRunClient(cc) t.Parallel() namespace := require.Namespace(t, cc) @@ -154,8 +156,13 @@ func TestAdditionalVolumes(t *testing.T) { }] } }]`, "spec", "instances") - err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + + err := dryrun.Create(ctx, tmp.DeepCopy()) assert.Assert(t, apierrors.IsInvalid(err)) + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) + assert.Equal(t, details.Causes[0].Field, "spec.instances[0].volumes.additional[0]") assert.ErrorContains(t, err, "you must set only one of image or claimName") }) @@ -178,9 +185,14 @@ func TestAdditionalVolumes(t *testing.T) { }] } }]`, "spec", "instances") - err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + + err := dryrun.Create(ctx, tmp.DeepCopy()) assert.Assert(t, apierrors.IsInvalid(err)) - assert.ErrorContains(t, err, "readOnly cannot be set false when using an ImageVolumeSource") + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) + assert.Equal(t, details.Causes[0].Field, "spec.instances[0].volumes.additional[0]") + assert.ErrorContains(t, err, "image volumes must be readOnly") }) t.Run("Reference must be set when using image volume", func(t *testing.T) { @@ -201,9 +213,15 @@ func TestAdditionalVolumes(t *testing.T) { }] } }]`, "spec", "instances") - err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + + err := dryrun.Create(ctx, tmp.DeepCopy()) assert.Assert(t, apierrors.IsInvalid(err)) - assert.ErrorContains(t, err, "if using an ImageVolumeSource, you must set a reference") + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 2)) + assert.Assert(t, cmp.Equal(details.Causes[0].Field, "spec.instances[0].volumes.additional[0].image.reference")) + assert.Assert(t, cmp.Equal(details.Causes[0].Type, "FieldValueRequired")) + assert.ErrorContains(t, err, "Required") }) t.Run("Reference cannot be an empty string when using image volume", func(t *testing.T) { @@ -225,9 +243,15 @@ func TestAdditionalVolumes(t *testing.T) { }] } }]`, "spec", "instances") - err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + + err := dryrun.Create(ctx, tmp.DeepCopy()) assert.Assert(t, apierrors.IsInvalid(err)) - assert.ErrorContains(t, err, "if using an ImageVolumeSource, you must set a reference") + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) + assert.Assert(t, cmp.Equal(details.Causes[0].Field, "spec.instances[0].volumes.additional[0].image.reference")) + assert.Assert(t, cmp.Equal(details.Causes[0].Type, "FieldValueInvalid")) + assert.ErrorContains(t, err, "at least 1 chars long") }) t.Run("ReadOnly can be omitted or set true when using image volume", func(t *testing.T) { @@ -265,6 +289,6 @@ func TestAdditionalVolumes(t *testing.T) { }] } }]`, "spec", "instances") - assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll)) + assert.NilError(t, dryrun.Create(ctx, tmp.DeepCopy())) }) } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 88c16b9af..26d633777 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -73,6 +73,9 @@ type PostgresClusterSpec struct { // e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, // the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, // e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1. + // --- + // [corev1.Container.Image] + // // +optional // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 Image string `json:"image,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index 79c343524..b276213f6 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -313,8 +313,7 @@ func (meta *Metadata) GetAnnotationsOrNil() map[string]string { // +structType=atomic // // +kubebuilder:validation:XValidation:rule=`has(self.claimName) != has(self.image)`,message=`you must set only one of image or claimName` -// +kubebuilder:validation:XValidation:rule=`!has(self.image) || !has(self.readOnly) || self.readOnly`,message=`readOnly cannot be set false when using an ImageVolumeSource` -// +kubebuilder:validation:XValidation:rule=`!has(self.image) || (self.?image.reference.hasValue() && self.image.reference.size() > 0)`,message=`if using an ImageVolumeSource, you must set a reference` +// +kubebuilder:validation:XValidation:rule=`!has(self.image) || !has(self.readOnly) || self.readOnly`,message=`image volumes must be readOnly` type AdditionalVolume struct { // Name of an existing PersistentVolumeClaim. // --- @@ -337,9 +336,11 @@ type AdditionalVolume struct { // +optional Containers []DNS1123Label `json:"containers"` - // Details for adding an image volume + // Reference to an image or OCI artifact. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#image // --- - // https://docs.k8s.io/concepts/storage/volumes#image + // Use "title" to add more validation in [internal/crd/post-process.jq]. + // +kubebuilder:title=$corev1.ImageVolumeSource // // +optional Image *corev1.ImageVolumeSource `json:"image,omitempty"` From cd9da8584c0341a3bfaab21f95cf658f7b04c3ba Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Thu, 2 Oct 2025 16:18:54 -0500 Subject: [PATCH 40/43] Use regexp to scan pgBackRest restore options once Input options were being scanned multiple times in different ways. --- .../controller/postgrescluster/pgbackrest.go | 75 ++++++++----------- .../postgrescluster/pgbackrest_test.go | 4 +- 2 files changed, 34 insertions(+), 45 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index e4e1a06d3..f15dfcce0 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -11,6 +11,7 @@ import ( "path/filepath" "reflect" "regexp" + "slices" "sort" "strings" "time" @@ -1190,35 +1191,36 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, pgdataVolume, pgwalVolume *corev1.PersistentVolumeClaim, pgtablespaceVolumes []*corev1.PersistentVolumeClaim, dataSource *v1beta1.PostgresClusterDataSource, - instanceName, instanceSetName, configHash, stanzaName string) error { - + instanceName, instanceSetName, configHash, stanzaName string, +) error { + hasFlag := make(map[string]bool) + matchFlag := regexp.MustCompile(`--[^ =]+`) repoName := dataSource.RepoName - options := dataSource.Options + + for _, input := range dataSource.Options { + for _, match := range matchFlag.FindAllString(input, -1) { + hasFlag[match] = true + } + } // ensure options are properly set // TODO (andrewlecuyer): move validation logic to a webhook - for _, opt := range options { + { var msg string switch { - // Since '--repo' can be set with or without an equals ('=') sign, we check for both - // usage patterns. - case strings.Contains(opt, "--repo=") || strings.Contains(opt, "--repo "): + case hasFlag["--repo"]: msg = "Option '--repo' is not allowed: please use the 'repoName' field instead." - case strings.Contains(opt, "--stanza"): - msg = "Option '--stanza' is not allowed: the operator will automatically set this " + - "option" - case strings.Contains(opt, "--pg1-path"): - msg = "Option '--pg1-path' is not allowed: the operator will automatically set this " + - "option" - case strings.Contains(opt, "--target-action"): - msg = "Option '--target-action' is not allowed: the operator will automatically set this " + - "option " - case strings.Contains(opt, "--link-map"): - msg = "Option '--link-map' is not allowed: the operator will automatically set this " + - "option " + case hasFlag["--stanza"]: + msg = "Option '--stanza' is not allowed: the operator will automatically set this option" + case hasFlag["--pg1-path"]: + msg = "Option '--pg1-path' is not allowed: the operator will automatically set this option" + case hasFlag["--target-action"]: + msg = "Option '--target-action' is not allowed: the operator will automatically set this option" + case hasFlag["--link-map"]: + msg = "Option '--link-map' is not allowed: the operator will automatically set this option" } if msg != "" { - r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidDataSource", msg, repoName) + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", msg) return nil } } @@ -1226,27 +1228,12 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, pgdata := postgres.DataDirectory(cluster) // combine options provided by user in the spec with those populated by the operator for a // successful restore - opts := append(options, []string{ - "--stanza=" + stanzaName, - "--pg1-path=" + pgdata, - "--repo=" + regexRepoIndex.FindString(repoName)}...) - - // Look specifically for the "--target" flag, NOT flags that contain - // "--target" (e.g. "--target-timeline") - targetRegex, err := regexp.Compile("--target[ =]") - if err != nil { - return err - } - var deltaOptFound, foundTarget bool - for _, opt := range opts { - switch { - case targetRegex.MatchString(opt): - foundTarget = true - case strings.Contains(opt, "--delta"): - deltaOptFound = true - } - } - if !deltaOptFound { + opts := append(slices.Clone(dataSource.Options), shell.QuoteWords( + "--stanza="+stanzaName, + "--pg1-path="+pgdata, + "--repo="+regexRepoIndex.FindString(repoName), + )...) + if !hasFlag["--delta"] { opts = append(opts, "--delta") } @@ -1262,14 +1249,14 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, // - https://github.com/pgbackrest/pgbackrest/blob/bb03b3f41942d0b781931092a76877ad309001ef/src/command/restore/restore.c#L1623 // - https://github.com/pgbackrest/pgbackrest/issues/1314 // - https://github.com/pgbackrest/pgbackrest/issues/987 - if foundTarget { + if hasFlag["--target"] { opts = append(opts, "--target-action=promote") } for i, instanceSpec := range cluster.Spec.InstanceSets { if instanceSpec.Name == instanceSetName { - opts = append(opts, "--link-map=pg_wal="+postgres.WALDirectory(cluster, - &cluster.Spec.InstanceSets[i])) + opts = append(opts, "--link-map=pg_wal="+ + postgres.WALDirectory(cluster, &cluster.Spec.InstanceSets[i])) } } diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index b0f4d0eb8..a976ff9ff 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -2355,7 +2356,8 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { LabelSelector: naming.PGBackRestRestoreJobSelector(clusterName), Namespace: cluster.Namespace, })) - assert.Assert(t, tc.result.jobCount == len(restoreJobs.Items)) + assert.Equal(t, tc.result.jobCount, len(restoreJobs.Items), + "got:\n%s", require.Value(yaml.Marshal(restoreJobs.Items))) if len(restoreJobs.Items) == 1 { assert.Assert(t, restoreJobs.Items[0].Labels[naming.LabelStartupInstance] != "") assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestConfigHash] != "") From db542d20f586180d5bedaca063406dfc379be3d8 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Thu, 2 Oct 2025 16:50:37 -0500 Subject: [PATCH 41/43] Pass Postgres settings to RestoreCommand using ParameterSet --- .../controller/postgrescluster/pgbackrest.go | 12 +++---- .../controller/postgrescluster/postgres.go | 2 +- internal/pgbackrest/config.go | 9 ++---- internal/pgbackrest/config_test.go | 16 +++++----- internal/postgres/huge_pages.go | 6 ++-- internal/postgres/huge_pages_test.go | 32 +++++++++---------- 6 files changed, 35 insertions(+), 42 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index f15dfcce0..14805e461 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1260,17 +1260,15 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, } } - // Check to see if huge pages have been requested in the spec. If they have, include 'huge_pages = try' - // in the restore command. If they haven't, include 'huge_pages = off'. - hugePagesSetting := "off" - if postgres.HugePagesRequested(cluster) { - hugePagesSetting = "try" + params := postgres.NewParameterSet() + postgres.SetHugePages(cluster, params) + if fetchKeyCommand := config.FetchKeyCommand(&cluster.Spec); fetchKeyCommand != "" { + params.Add("encryption_key_command", fetchKeyCommand) } // NOTE (andrewlecuyer): Forcing users to put each argument separately might prevent the need // to do any escaping or use eval. - cmd := pgbackrest.RestoreCommand(pgdata, hugePagesSetting, config.FetchKeyCommand(&cluster.Spec), - pgtablespaceVolumes, strings.Join(opts, " ")) + cmd := pgbackrest.RestoreCommand(pgdata, params, strings.Join(opts, " ")) // create the volume resources required for the postgres data directory dataVolumeMount := postgres.DataVolumeMount() diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index beaec3cdf..33907043f 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -134,7 +134,7 @@ func (r *Reconciler) generatePostgresParameters( pgaudit.PostgreSQLParameters(&builtin) pgbackrest.PostgreSQLParameters(cluster, &builtin, backupsSpecFound) pgmonitor.PostgreSQLParameters(ctx, cluster, &builtin) - postgres.SetHugePages(cluster, &builtin) + postgres.SetHugePages(cluster, builtin.Default) // Last write wins, so start with the recommended defaults. result := cmp.Or(builtin.Default.DeepCopy(), postgres.NewParameterSet()) diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 808354007..1ee5d9c03 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -212,10 +212,9 @@ func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, // - Renames the data directory as needed to bootstrap the cluster using the restored database. // This ensures compatibility with the "existing" bootstrap method that is included in the // Patroni config when bootstrapping a cluster using an existing data directory. -func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, _ []*corev1.PersistentVolumeClaim, args ...string) []string { - ps := postgres.NewParameterSet() +func RestoreCommand(pgdata string, params *postgres.ParameterSet, args ...string) []string { + ps := params.DeepCopy() ps.Add("data_directory", pgdata) - ps.Add("huge_pages", hugePagesSetting) // Keep history and WAL files until the cluster starts with its normal // archiving enabled. @@ -226,10 +225,6 @@ func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, _ []*corev // progress during recovery. ps.Add("hot_standby", "on") - if fetchKeyCommand != "" { - ps.Add("encryption_key_command", fetchKeyCommand) - } - configure := strings.Join([]string{ // With "hot_standby" on, some parameters cannot be smaller than they were // when Postgres was backed up. Configure these to match values reported by diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index 91ce833c0..ffb6ba365 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -18,6 +18,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -789,11 +790,7 @@ func TestReloadCommandPrettyYAML(t *testing.T) { func TestRestoreCommand(t *testing.T) { shellcheck := require.ShellCheck(t) - pgdata := "/pgdata/pg13" - opts := []string{ - "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, - "--repo=1"} - command := RestoreCommand(pgdata, "try", "", nil, strings.Join(opts, " ")) + command := RestoreCommand("/pgdata/pg13", postgres.NewParameterSet(), "--repo=1") assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) assert.Assert(t, len(command) > 3) @@ -810,17 +807,20 @@ func TestRestoreCommand(t *testing.T) { func TestRestoreCommandPrettyYAML(t *testing.T) { assert.Assert(t, cmp.MarshalContains( - RestoreCommand("/dir", "try", "", nil, "--options"), + RestoreCommand("/dir", postgres.NewParameterSet(), "--options"), "\n- |", ), "expected literal block scalar") } func TestRestoreCommandTDE(t *testing.T) { + params := postgres.NewParameterSet() + params.Add("encryption_key_command", "whatever") + assert.Assert(t, cmp.MarshalContains( - RestoreCommand("/dir", "try", "echo testValue", nil, "--options"), - "encryption_key_command = 'echo testValue'", + RestoreCommand("/dir", params, "--options"), + "encryption_key_command = 'whatever'", ), "expected encryption_key_command setting") } diff --git a/internal/postgres/huge_pages.go b/internal/postgres/huge_pages.go index b38120baf..9dd408ba3 100644 --- a/internal/postgres/huge_pages.go +++ b/internal/postgres/huge_pages.go @@ -16,11 +16,11 @@ import ( // This function looks for a valid huge_pages resource request. If it finds one, // it sets the PostgreSQL parameter "huge_pages" to "try". If it doesn't find // one, it sets "huge_pages" to "off". -func SetHugePages(cluster *v1beta1.PostgresCluster, pgParameters *Parameters) { +func SetHugePages(cluster *v1beta1.PostgresCluster, params *ParameterSet) { if HugePagesRequested(cluster) { - pgParameters.Default.Add("huge_pages", "try") + params.Add("huge_pages", "try") } else { - pgParameters.Default.Add("huge_pages", "off") + params.Add("huge_pages", "off") } } diff --git a/internal/postgres/huge_pages_test.go b/internal/postgres/huge_pages_test.go index 9b9f12172..69528d568 100644 --- a/internal/postgres/huge_pages_test.go +++ b/internal/postgres/huge_pages_test.go @@ -27,11 +27,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "off") }) t.Run("hugepages quantity not set", func(t *testing.T) { @@ -48,11 +48,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "off") }) t.Run("hugepages set to zero", func(t *testing.T) { @@ -68,11 +68,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "off") }) t.Run("hugepages set correctly", func(t *testing.T) { @@ -88,11 +88,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "try") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "try") }) } From 0276eff88a5e751830880a81a7934ea9bb866cd5 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Thu, 2 Oct 2025 16:58:47 -0500 Subject: [PATCH 42/43] Look in more directories for restore binaries This makes restores compatible with images from other distros. Issue: PGO-864 --- internal/controller/postgrescluster/pgbackrest.go | 2 +- internal/pgbackrest/config.go | 3 ++- internal/pgbackrest/config_test.go | 9 ++++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 14805e461..52065093a 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1268,7 +1268,7 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, // NOTE (andrewlecuyer): Forcing users to put each argument separately might prevent the need // to do any escaping or use eval. - cmd := pgbackrest.RestoreCommand(pgdata, params, strings.Join(opts, " ")) + cmd := pgbackrest.RestoreCommand(cluster.Spec.PostgresVersion, pgdata, params, strings.Join(opts, " ")) // create the volume resources required for the postgres data directory dataVolumeMount := postgres.DataVolumeMount() diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 1ee5d9c03..f411491fc 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -212,7 +212,7 @@ func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, // - Renames the data directory as needed to bootstrap the cluster using the restored database. // This ensures compatibility with the "existing" bootstrap method that is included in the // Patroni config when bootstrapping a cluster using an existing data directory. -func RestoreCommand(pgdata string, params *postgres.ParameterSet, args ...string) []string { +func RestoreCommand(postgresVersion int32, pgdata string, params *postgres.ParameterSet, args ...string) []string { ps := params.DeepCopy() ps.Add("data_directory", pgdata) @@ -266,6 +266,7 @@ func RestoreCommand(pgdata string, params *postgres.ParameterSet, args ...string script := strings.Join([]string{ `declare -r PGDATA="$1" opts="$2"; export PGDATA PGHOST`, + postgres.ShellPath(postgresVersion), // Remove any "postmaster.pid" file leftover from a prior failure. `rm -f "${PGDATA}/postmaster.pid"`, diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index ffb6ba365..e6ca0b2a7 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -790,11 +790,14 @@ func TestReloadCommandPrettyYAML(t *testing.T) { func TestRestoreCommand(t *testing.T) { shellcheck := require.ShellCheck(t) - command := RestoreCommand("/pgdata/pg13", postgres.NewParameterSet(), "--repo=1") + command := RestoreCommand(19, "/pgdata/pg13", postgres.NewParameterSet(), "--repo=1") assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) assert.Assert(t, len(command) > 3) + assert.Assert(t, cmp.Contains(command[3], "/usr/pgsql-19/bin"), + "expected path to PostgreSQL binaries") + dir := t.TempDir() file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) @@ -807,7 +810,7 @@ func TestRestoreCommand(t *testing.T) { func TestRestoreCommandPrettyYAML(t *testing.T) { assert.Assert(t, cmp.MarshalContains( - RestoreCommand("/dir", postgres.NewParameterSet(), "--options"), + RestoreCommand(9, "/dir", postgres.NewParameterSet(), "--options"), "\n- |", ), "expected literal block scalar") @@ -819,7 +822,7 @@ func TestRestoreCommandTDE(t *testing.T) { assert.Assert(t, cmp.MarshalContains( - RestoreCommand("/dir", params, "--options"), + RestoreCommand(20, "/dir", params, "--options"), "encryption_key_command = 'whatever'", ), "expected encryption_key_command setting") From a736c1427b03bf4016428a3edbdb648d11273ac9 Mon Sep 17 00:00:00 2001 From: Drew Sessler Date: Tue, 7 Oct 2025 11:12:42 -0700 Subject: [PATCH 43/43] Update postgrescluster and pgupgrade APIs to allow pg18 --- .../bases/postgres-operator.crunchydata.com_pgupgrades.yaml | 4 ++-- .../postgres-operator.crunchydata.com_postgresclusters.yaml | 4 ++-- .../v1/postgrescluster_types.go | 2 +- .../v1beta1/pgupgrade_types.go | 4 ++-- .../v1beta1/postgrescluster_types.go | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 97bc242fd..2ae159429 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -955,7 +955,7 @@ spec: fromPostgresVersion: description: The major version of PostgreSQL before the upgrade. format: int32 - maximum: 17 + maximum: 18 minimum: 11 type: integer image: @@ -1079,7 +1079,7 @@ spec: toPostgresVersion: description: The major version of PostgreSQL to be upgraded to. format: int32 - maximum: 17 + maximum: 18 minimum: 11 type: integer tolerations: diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 769d866e4..921d1fc48 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -13250,7 +13250,7 @@ spec: description: The major version of PostgreSQL installed in the PostgreSQL image format: int32 - maximum: 17 + maximum: 18 minimum: 11 type: integer proxy: @@ -32335,7 +32335,7 @@ spec: description: The major version of PostgreSQL installed in the PostgreSQL image format: int32 - maximum: 17 + maximum: 18 minimum: 11 type: integer proxy: diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index 8ae03bf25..a8aaa5936 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -142,7 +142,7 @@ type PostgresClusterSpec struct { // The major version of PostgreSQL installed in the PostgreSQL image // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 PostgresVersion int32 `json:"postgresVersion"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go index a7f40dc83..56138a61f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go @@ -79,7 +79,7 @@ type PGUpgradeSettings struct { // The major version of PostgreSQL before the upgrade. // --- // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +required FromPostgresVersion int32 `json:"fromPostgresVersion"` @@ -93,7 +93,7 @@ type PGUpgradeSettings struct { // The major version of PostgreSQL to be upgraded to. // --- // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +required ToPostgresVersion int32 `json:"toPostgresVersion"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 26d633777..ed539341d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -130,7 +130,7 @@ type PostgresClusterSpec struct { // The major version of PostgreSQL installed in the PostgreSQL image // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 PostgresVersion int32 `json:"postgresVersion"`