diff --git a/.dockerignore b/.dockerignore index 6ff2842b87..32fab58f69 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,4 +3,5 @@ /.git /bin /hack +!/hack/extract-licenses.go !/hack/tools/queries diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 2ae50c8a60..780582498e 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -64,9 +64,9 @@ jobs: with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534 - run: make createnamespaces check-envtest-existing env: @@ -98,15 +98,16 @@ jobs: with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.3-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.4-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.5-2520 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.5-3.4-2520 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.3-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.6-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534 - name: Get pgMonitor files. run: make get-pgmonitor @@ -126,17 +127,18 @@ jobs: --env 'CHECK_FOR_UPGRADES=false' \ --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ - --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520' \ - --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520' \ - --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2520' \ - --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.5-2520' \ - --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.3-2520' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.4-2520' \ - --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520' \ - --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.5-3.4-2520' \ - --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2520' \ - --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.2-0' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534' \ + --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2534' \ + --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.6-2534' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.3-2534' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534' \ + --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534' \ + --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534' \ + --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.5=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2534' \ + --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.3-0' \ --env 'PGO_FEATURE_GATES=TablespaceVolumes=true,OpenTelemetryLogs=true,OpenTelemetryMetrics=true' \ --name 'postgres-operator' localhost/postgres-operator @@ -152,7 +154,7 @@ jobs: KUTTL_PG_UPGRADE_TO_VERSION: '17' KUTTL_PG_VERSION: '16' KUTTL_POSTGIS_VERSION: '3.4' - KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534' - run: | make check-kuttl && exit failed=$? diff --git a/Dockerfile b/Dockerfile index a218dfe492..f6d60b699a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,18 +4,25 @@ FROM docker.io/library/golang:bookworm AS build -COPY licenses /licenses -COPY hack/tools/queries /opt/crunchy/conf - WORKDIR /usr/src/app COPY . . + ENV GOCACHE=/var/cache/go -RUN --mount=type=cache,target=/var/cache/go go build ./cmd/postgres-operator +ENV GOMODCACHE=/var/cache/gomod +RUN --mount=type=cache,target=/var/cache \ +<<-SHELL +set -e +go build ./cmd/postgres-operator +go run ./hack/extract-licenses.go licenses postgres-operator + +find ./hack/tools/queries '(' -type d -exec chmod 0555 '{}' + ')' -o '(' -type f -exec chmod 0444 '{}' + ')' +find ./licenses '(' -type d -exec chmod 0555 '{}' + ')' -o '(' -type f -exec chmod 0444 '{}' + ')' +SHELL FROM docker.io/library/debian:bookworm -COPY --from=build /licenses /licenses -COPY --from=build /opt/crunchy/conf /opt/crunchy/conf +COPY --from=build /usr/src/app/licenses /licenses +COPY --from=build /usr/src/app/hack/tools/queries /opt/crunchy/conf COPY --from=build /usr/src/app/postgres-operator /usr/local/bin USER 2 diff --git a/Makefile b/Makefile index 7305d666cf..e4e2cd4f60 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,7 @@ CONTROLLER ?= $(GO) tool sigs.k8s.io/controller-tools/cmd/controller-gen # Run tests using the latest tools. CHAINSAW ?= $(GO) run github.com/kyverno/chainsaw@latest CHAINSAW_TEST ?= $(CHAINSAW) test +CRD_CHECKER ?= $(GO) run github.com/openshift/crd-schema-checker/cmd/crd-schema-checker@latest ENVTEST ?= $(GO) run sigs.k8s.io/controller-runtime/tools/setup-envtest@latest KUTTL ?= $(GO) run github.com/kudobuilder/kuttl/cmd/kubectl-kuttl@latest KUTTL_TEST ?= $(KUTTL) test @@ -132,6 +133,7 @@ deploy-dev: createnamespaces .PHONY: build build: ## Build a postgres-operator image +build: get-pgmonitor $(BUILDAH) build --tag localhost/postgres-operator \ --label org.opencontainers.image.authors='Crunchy Data' \ --label org.opencontainers.image.description='Crunchy PostgreSQL Operator' \ @@ -147,6 +149,12 @@ check: ## Run basic go tests with coverage output check: get-pgmonitor QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" $(GO_TEST) -cover ./... +# Informational only; no criteria to enforce at this time. +.PHONY: check-crd +check-crd: + $(foreach CRD, $(wildcard config/crd/bases/*.yaml), \ + $(CRD_CHECKER) check-manifests --new-crd-filename '$(CRD)' 2>&1 | awk -f hack/check-manifests.awk $(newline)) + # Available versions: curl -s 'https://storage.googleapis.com/kubebuilder-tools/' | grep -o '[^<]*' # - KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true .PHONY: check-envtest @@ -198,7 +206,7 @@ generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 16 generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 17 generate-kuttl: export KUTTL_PG_VERSION ?= 16 generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.4 -generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520 +generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534 generate-kuttl: export KUTTL_TEST_DELETE_NAMESPACE ?= kuttl-test-delete-namespace generate-kuttl: ## Generate kuttl tests [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated @@ -253,3 +261,9 @@ generate-rbac: ## Generate RBAC ) rbac:roleName='postgres-operator' $(\ ) paths='./cmd/...' paths='./internal/...' $(\ ) output:dir='config/rbac' # {directory}/role.yaml + +# https://www.gnu.org/software/make/manual/make.html#Multi_002dLine +define newline + + +endef diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index dd321d5541..50ac74943d 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -34,7 +34,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/tracing" "github.com/crunchydata/postgres-operator/internal/upgradecheck" @@ -256,8 +255,8 @@ func main() { } // add all PostgreSQL Operator controllers to the runtime manager - addControllersToManager(manager, log, registrar) must(pgupgrade.ManagedReconciler(manager, registrar)) + must(postgrescluster.ManagedReconciler(manager, registrar)) must(standalone_pgadmin.ManagedReconciler(manager)) must(crunchybridgecluster.ManagedReconciler(manager, func() bridge.ClientInterface { return bridgeClient() @@ -306,19 +305,3 @@ func main() { log.Info("shutdown complete") } } - -// addControllersToManager adds all PostgreSQL Operator controllers to the provided controller -// runtime manager. -func addControllersToManager(mgr runtime.Manager, log logging.Logger, reg registration.Registration) { - pgReconciler := &postgrescluster.Reconciler{ - Client: mgr.GetClient(), - Owner: naming.ControllerPostgresCluster, - Recorder: mgr.GetEventRecorderFor(naming.ControllerPostgresCluster), - Registration: reg, - } - - if err := pgReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create PostgresCluster controller") - os.Exit(1) - } -} diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml index a49b7a52ee..5313e686e1 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: crunchybridgeclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -90,7 +89,7 @@ spec: - aws - azure - gcp - maxLength: 10 + maxLength: 5 type: string x-kubernetes-validations: - message: immutable @@ -199,6 +198,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index 313fa590ad..b2c0301207 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: pgadmins.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -1610,7 +1609,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -2579,7 +2578,7 @@ spec: enum: - Administrator - User - maxLength: 15 + maxLength: 13 type: string username: description: |- @@ -2620,6 +2619,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -2633,10 +2663,14 @@ spec: read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -2694,6 +2728,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. @@ -2719,6 +2754,10 @@ spec: description: MajorVersion represents the major version of the running pgAdmin. type: integer + minorVersion: + description: MinorVersion represents the minor version of the running + pgAdmin. + type: string observedGeneration: description: observedGeneration represents the .metadata.generation on which the status was based. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 2476377b23..2ae1594295 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: pgupgrades.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -956,7 +955,7 @@ spec: fromPostgresVersion: description: The major version of PostgreSQL before the upgrade. format: int32 - maximum: 17 + maximum: 18 minimum: 11 type: integer image: @@ -971,7 +970,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -1080,7 +1079,7 @@ spec: toPostgresVersion: description: The major version of PostgreSQL to be upgraded to. format: int32 - maximum: 17 + maximum: 18 minimum: 11 type: integer tolerations: @@ -1133,7 +1132,7 @@ spec: - Copy - CopyFileRange - Link - maxLength: 15 + maxLength: 13 type: string required: - fromPostgresVersion @@ -1195,6 +1194,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 5fe0db4d0a..921d1fc48a 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1,9 +1,8 @@ --- +# controller-gen.kubebuilder.io/version: v0.18.0 apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -151,7 +150,7 @@ spec: properties: configuration: description: |- - Projected volumes containing custom pgBackRest configuration. These files are mounted + Projected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.html @@ -1424,6 +1423,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in Backup Job Pods. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest backup Job pods. @@ -1559,6 +1566,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -1572,10 +1610,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -1583,6 +1626,14 @@ spec: x-kubernetes-list-type: map type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in postgres instance pods. + properties: + path: + maxLength: 256 + type: string + type: object manual: description: Defines details for manual pgBackRest backup Jobs @@ -2551,6 +2602,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in the repo host pod. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest repo host pod. Changing this value @@ -2975,6 +3034,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -2988,10 +3078,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -4410,6 +4505,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -4423,10 +4549,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -4562,6 +4693,21 @@ spec: required: - repos type: object + x-kubernetes-validations: + - message: pgbackrest sidecar log path is restricted to an existing + additional volume + rule: '!self.?log.path.hasValue() || self.log.path.startsWith("/volumes/")' + - message: repo host log path is restricted to an existing additional + volume + rule: '!self.?repoHost.log.path.hasValue() || self.repoHost.volumes.additional.exists(x, + self.repoHost.log.path.startsWith("/volumes/"+x.name))' + - message: backup jobs log path is restricted to an existing additional + volume + rule: '!self.?jobs.log.path.hasValue() || self.jobs.volumes.additional.exists(x, + self.jobs.log.path.startsWith("/volumes/"+x.name))' + - message: pgbackrest log-path must be set via the various log.path + fields in the spec + rule: '!self.?global["log-path"].hasValue()' snapshots: description: VolumeSnapshot configuration properties: @@ -6797,6 +6943,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -6810,10 +6987,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -7913,6 +8095,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -7926,10 +8139,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -8035,7 +8253,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -11209,6 +11427,7 @@ spec: type: object type: array volumes: + description: Volumes to be added to the instance set. properties: additional: description: Additional pre-existing volumes to add to the @@ -11233,6 +11452,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -11246,10 +11496,14 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -11683,6 +11937,7 @@ spec: required: - dataVolumeClaimSpec type: object + maxItems: 16 minItems: 1 type: array x-kubernetes-list-map-keys: @@ -12915,7 +13170,7 @@ spec: - INFO - DEBUG - NOTSET - maxLength: 10 + maxLength: 8 type: string storageLimit: description: |- @@ -12960,7 +13215,7 @@ spec: enum: - Switchover - Failover - maxLength: 15 + maxLength: 10 type: string required: - enabled @@ -12994,7 +13249,8 @@ spec: postgresVersion: description: The major version of PostgreSQL installed in the PostgreSQL image - maximum: 17 + format: int32 + maximum: 18 minimum: 11 type: integer proxy: @@ -15874,14 +16130,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -15891,6 +16147,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -15899,6 +16156,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -15927,7 +16185,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object sidecars: @@ -16239,6 +16497,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -16252,10 +16541,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -16281,14 +16575,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -16298,6 +16592,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -16306,6 +16601,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -16334,7 +16630,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object service: @@ -16346,14 +16642,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -16363,6 +16659,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -16371,6 +16668,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -16399,7 +16697,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object shutdown: @@ -18045,14 +18343,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -18062,6 +18360,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -18070,6 +18369,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -18098,7 +18398,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object tolerations: @@ -18381,7 +18681,7 @@ spec: enum: - ASCII - AlphaNumeric - maxLength: 15 + maxLength: 12 type: string required: - type @@ -18412,7 +18712,15 @@ spec: - fieldPath: .config.parameters.log_directory message: all instances need an additional volume to log in "/volumes" rule: self.?config.parameters.log_directory.optMap(v, type(v) != string - || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue())).orValue(true) + || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() + && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + + volume.name)))).orValue(true) + - fieldPath: .backups.pgbackrest.log.path + message: all instances need an additional volume for pgbackrest sidecar + to log in "/volumes" + rule: self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes") + || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, + v.startsWith("/volumes/" + volume.name)))).orValue(true) status: description: PostgresClusterStatus defines the observed state of PostgresCluster properties: @@ -18462,6 +18770,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. @@ -18754,6 +19063,7 @@ spec: description: |- Stores the current PostgreSQL major version following a successful major PostgreSQL upgrade. + format: int32 type: integer proxy: description: Current state of the PostgreSQL proxy. @@ -18948,7 +19258,7 @@ spec: properties: configuration: description: |- - Projected volumes containing custom pgBackRest configuration. These files are mounted + Projected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.html @@ -20221,6 +20531,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in Backup Job Pods. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest backup Job pods. @@ -20356,6 +20674,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -20369,10 +20718,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -20380,6 +20734,14 @@ spec: x-kubernetes-list-type: map type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in postgres instance pods. + properties: + path: + maxLength: 256 + type: string + type: object manual: description: Defines details for manual pgBackRest backup Jobs @@ -21348,6 +21710,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in the repo host pod. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest repo host pod. Changing this value @@ -21772,6 +22142,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -21785,10 +22186,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -23207,6 +23613,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -23220,10 +23657,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) + || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -25587,6 +26029,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -25600,10 +26073,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -26703,6 +27181,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -26716,10 +27225,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -26825,7 +27339,7 @@ spec: - Always - Never - IfNotPresent - maxLength: 15 + maxLength: 12 type: string imagePullSecrets: description: |- @@ -29999,6 +30513,7 @@ spec: type: object type: array volumes: + description: Volumes to be added to the instance set. properties: additional: description: Additional pre-existing volumes to add to the @@ -30023,6 +30538,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -30036,10 +30582,14 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -31705,7 +32255,7 @@ spec: - INFO - DEBUG - NOTSET - maxLength: 10 + maxLength: 8 type: string storageLimit: description: |- @@ -31750,7 +32300,7 @@ spec: enum: - Switchover - Failover - maxLength: 15 + maxLength: 10 type: string required: - enabled @@ -31784,7 +32334,8 @@ spec: postgresVersion: description: The major version of PostgreSQL installed in the PostgreSQL image - maximum: 17 + format: int32 + maximum: 18 minimum: 11 type: integer proxy: @@ -34664,14 +35215,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -34681,6 +35232,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -34689,6 +35241,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -34717,7 +35270,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object sidecars: @@ -35029,6 +35582,37 @@ spec: maxItems: 10 type: array x-kubernetes-list-type: set + image: + description: |- + Reference to an image or OCI artifact. + More info: https://kubernetes.io/docs/concepts/storage/volumes#image + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + enum: + - Always + - Never + - IfNotPresent + maxLength: 12 + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + minLength: 1 + type: string + required: + - reference + type: object name: description: |- The name of the directory in which to mount this volume. @@ -35042,10 +35626,15 @@ spec: otherwise read-write. Defaults to false. type: boolean required: - - claimName - name type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: you must set only one of image or claimName + rule: has(self.claimName) != has(self.image) + - message: image volumes must be readOnly + rule: '!has(self.image) || !has(self.readOnly) || + self.readOnly' maxItems: 10 type: array x-kubernetes-list-map-keys: @@ -35065,14 +35654,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -35082,6 +35671,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -35090,6 +35680,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -35118,7 +35709,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object service: @@ -35130,14 +35721,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -35147,6 +35738,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -35155,6 +35747,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -35183,7 +35776,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object shutdown: @@ -36829,14 +37422,14 @@ spec: enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string internalTrafficPolicy: description: 'More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies' enum: - Cluster - Local - maxLength: 10 + maxLength: 7 type: string ipFamilies: items: @@ -36846,6 +37439,7 @@ spec: enum: - IPv4 - IPv6 + maxLength: 4 type: string type: array ipFamilyPolicy: @@ -36854,6 +37448,7 @@ spec: - SingleStack - PreferDualStack - RequireDualStack + maxLength: 16 type: string metadata: description: Metadata contains metadata for custom resources @@ -36882,7 +37477,7 @@ spec: - ClusterIP - NodePort - LoadBalancer - maxLength: 15 + maxLength: 12 type: string type: object tolerations: @@ -37162,7 +37757,7 @@ spec: enum: - ASCII - AlphaNumeric - maxLength: 15 + maxLength: 12 type: string required: - type @@ -37228,6 +37823,7 @@ spec: - "True" - "False" - Unknown + maxLength: 7 type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. @@ -37520,6 +38116,7 @@ spec: description: |- Stores the current PostgreSQL major version following a successful major PostgreSQL upgrade. + format: int32 type: integer proxy: description: Current state of the PostgreSQL proxy. diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index fc86b653e1..75c3d4c521 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,27 +23,29 @@ spec: - name: CRUNCHY_DEBUG value: "true" - name: RELATED_IMAGE_POSTGRES_16 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.9-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.10-2534" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.3-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.3-2534" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.9-3.4-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-16.10-3.4-2534" - name: RELATED_IMAGE_POSTGRES_17 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534" - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.5-3.5-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.4-2534" + - name: RELATED_IMAGE_POSTGRES_17_GIS_3.5 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi9-17.6-3.5-2534" - name: RELATED_IMAGE_PGBACKREST - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.54.2-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2534" - name: RELATED_IMAGE_PGBOUNCER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2534" - name: RELATED_IMAGE_PGEXPORTER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.17.1-2534" - name: RELATED_IMAGE_PGUPGRADE - value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.5-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-17.6-2534" - name: RELATED_IMAGE_STANDALONE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2520" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.2-2534" - name: RELATED_IMAGE_COLLECTOR - value: "registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.2-0" + value: "registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.3-0" securityContext: allowPrivilegeEscalation: false capabilities: { drop: [ALL] } diff --git a/examples/postgrescluster/postgrescluster.yaml b/examples/postgrescluster/postgrescluster.yaml index 75756af94e..91cc31a524 100644 --- a/examples/postgrescluster/postgrescluster.yaml +++ b/examples/postgrescluster/postgrescluster.yaml @@ -3,7 +3,7 @@ kind: PostgresCluster metadata: name: hippo spec: - postgresVersion: 16 + postgresVersion: 17 instances: - name: instance1 dataVolumeClaimSpec: diff --git a/go.mod b/go.mod index 287be117ff..74914ddeb7 100644 --- a/go.mod +++ b/go.mod @@ -10,20 +10,20 @@ require ( github.com/google/uuid v1.6.0 github.com/itchyny/gojq v0.12.17 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 - github.com/onsi/ginkgo/v2 v2.25.2 + github.com/onsi/ginkgo/v2 v2.25.3 github.com/onsi/gomega v1.38.2 github.com/pganalyze/pg_query_go/v6 v6.1.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/xdg-go/stringprep v1.0.4 - go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 - go.opentelemetry.io/contrib/propagators/autoprop v0.57.0 - go.opentelemetry.io/otel v1.33.0 - go.opentelemetry.io/otel/sdk v1.33.0 - go.opentelemetry.io/otel/trace v1.33.0 - golang.org/x/crypto v0.41.0 - golang.org/x/tools v0.36.0 + go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 + go.opentelemetry.io/contrib/propagators/autoprop v0.63.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + golang.org/x/crypto v0.42.0 + golang.org/x/tools v0.37.0 gotest.tools/v3 v3.5.2 k8s.io/api v0.33.4 k8s.io/apimachinery v0.33.4 @@ -37,12 +37,12 @@ require ( ) require ( - cel.dev/expr v0.19.1 // indirect + cel.dev/expr v0.24.0 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -63,7 +63,8 @@ require ( github.com/google/gnostic-models v0.6.9 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -76,64 +77,67 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect - go.opentelemetry.io/contrib/propagators/aws v1.32.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.32.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.32.0 // indirect - go.opentelemetry.io/contrib/propagators/ot v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect - go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect + go.opentelemetry.io/contrib/propagators/aws v1.38.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.38.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.27.0 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/mod v0.28.0 // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/grpc v1.68.1 // indirect - google.golang.org/protobuf v1.36.7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.33.0 // indirect k8s.io/apiserver v0.33.0 // indirect + k8s.io/code-generator v0.33.0 // indirect + k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect - sigs.k8s.io/controller-tools v0.17.3 // indirect + sigs.k8s.io/controller-tools v0.18.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index d60a6185ff..e65172ea2e 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= -cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= @@ -10,8 +10,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -76,8 +76,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/itchyny/gojq v0.12.17 h1:8av8eGduDb5+rvEdaOO+zQUjA04MS0m3Ps8HiD+fceg= @@ -122,8 +124,8 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= -github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= +github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/pganalyze/pg_query_go/v6 v6.1.0 h1:jG5ZLhcVgL1FAw4C/0VNQaVmX1SUJx71wBGdtTtBvls= @@ -134,14 +136,16 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -163,8 +167,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= @@ -174,60 +178,62 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/contrib/propagators/autoprop v0.57.0 h1:bNPJOdT5154XxzeFmrh8R+PXnV4t3TZEczy8gHEpcpg= -go.opentelemetry.io/contrib/propagators/autoprop v0.57.0/go.mod h1:Tb0j0mK+QatKdCxCKPN7CSzc7kx/q34/KaohJx/N96s= -go.opentelemetry.io/contrib/propagators/aws v1.32.0 h1:NELzr8bW7a7aHVZj5gaep1PfkvoSCGx+1qNGZx/uhhU= -go.opentelemetry.io/contrib/propagators/aws v1.32.0/go.mod h1:XKMrzHNka3eOA+nGEcNKYVL9s77TAhkwQEynYuaRFnQ= -go.opentelemetry.io/contrib/propagators/b3 v1.32.0 h1:MazJBz2Zf6HTN/nK/s3Ru1qme+VhWU5hm83QxEP+dvw= -go.opentelemetry.io/contrib/propagators/b3 v1.32.0/go.mod h1:B0s70QHYPrJwPOwD1o3V/R8vETNOG9N3qZf4LDYvA30= -go.opentelemetry.io/contrib/propagators/jaeger v1.32.0 h1:K/fOyTMD6GELKTIJBaJ9k3ppF2Njt8MeUGBOwfaWXXA= -go.opentelemetry.io/contrib/propagators/jaeger v1.32.0/go.mod h1:ISE6hda//MTWvtngG7p4et3OCngsrTVfl7c6DjN17f8= -go.opentelemetry.io/contrib/propagators/ot v1.32.0 h1:Poy02A4wOZubHyd2hpHPDgZW+rn6EIq0vCwTZJ6Lmu8= -go.opentelemetry.io/contrib/propagators/ot v1.32.0/go.mod h1:cbhaURV+VR3NIMarzDYZU1RDEkXG1fNd1WMP1XCcGkY= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= -go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= -go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/contrib/propagators/autoprop v0.63.0 h1:S3+4UwR3Y1tUKklruMwOacAFInNvtuOexz4ZTmJNAyw= +go.opentelemetry.io/contrib/propagators/autoprop v0.63.0/go.mod h1:qpIuOggbbw2T9nKRaO1je/oTRKd4zslAcJonN8LYbTg= +go.opentelemetry.io/contrib/propagators/aws v1.38.0 h1:eRZ7asSbLc5dH7+TBzL6hFKb1dabz0IV51uUUwYRZts= +go.opentelemetry.io/contrib/propagators/aws v1.38.0/go.mod h1:wXqc9NTGcXapBExHBDVLEZlByu6quiQL8w7Tjgv8TCg= +go.opentelemetry.io/contrib/propagators/b3 v1.38.0 h1:uHsCCOSKl0kLrV2dLkFK+8Ywk9iKa/fptkytc6aFFEo= +go.opentelemetry.io/contrib/propagators/b3 v1.38.0/go.mod h1:wMRSZJZcY8ya9mApLLhwIMjqmApy2o/Ml+62lhvxyHU= +go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 h1:nXGeLvT1QtCAhkASkP/ksjkTKZALIaQBIW+JSIw1KIc= +go.opentelemetry.io/contrib/propagators/jaeger v1.38.0/go.mod h1:oMvOXk78ZR3KEuPMBgp/ThAMDy9ku/eyUVztr+3G6Wo= +go.opentelemetry.io/contrib/propagators/ot v1.38.0 h1:k4gSyyohaDXI8F9BDXYC3uO2vr5sRNeQFMsN9Zn0EoI= +go.opentelemetry.io/contrib/propagators/ot v1.38.0/go.mod h1:2hDsuiHRO39SRUMhYGqmj64z/IuMRoxE4bBSFR82Lo8= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= +go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= +go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -244,31 +250,31 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -279,18 +285,18 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -298,8 +304,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -310,16 +316,18 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -346,8 +354,12 @@ k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/code-generator v0.33.0 h1:B212FVl6EFqNmlgdOZYWNi77yBv+ed3QgQsMR8YQCw4= +k8s.io/code-generator v0.33.0/go.mod h1:KnJRokGxjvbBQkSJkbVuBbu6z4B0rC7ynkpY5Aw6m9o= k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog= +k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= @@ -358,8 +370,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUo sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= -sigs.k8s.io/controller-tools v0.17.3 h1:lwFPLicpBKLgIepah+c8ikRBubFW5kOQyT88r3EwfNw= -sigs.k8s.io/controller-tools v0.17.3/go.mod h1:1ii+oXcYZkxcBXzwv3YZBlzjt1fvkrCGjVF73blosJI= +sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE= +sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= diff --git a/hack/check-manifests.awk b/hack/check-manifests.awk new file mode 100644 index 0000000000..0e6e23ffff --- /dev/null +++ b/hack/check-manifests.awk @@ -0,0 +1,26 @@ +# Copyright 2025 Crunchy Data Solutions, Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +## TODO: Exit successfully only when there are no errors. +#/^ERROR:/ { rc = 1 } +#END { exit rc } + +# Shorten these frequent messages about validation rules. +/The maximum allowable value is 10000000[.]/ { + sub(/ The maximum allowable value is 10000000./, "") + sub(/ allowed budget/, "&, 10M") +} + +# These are informational, but "MustNot" sounds like something is wrong. +/^info: "MustNotExceedCostBudget"/ { + sub(/"MustNotExceedCostBudget"/, "\"CostBudget\"") +} + +# Color errors and warnings when attached to a terminal. +ENVIRON["MAKE_TERMOUT"] != "" { + sub(/^ERROR:/, "\033[0;31m&\033[0m") + sub(/^Warning:/, "\033[1;33m&\033[0m") +} + +{ print } diff --git a/hack/extract-licenses.go b/hack/extract-licenses.go new file mode 100644 index 0000000000..3f75e2fc2c --- /dev/null +++ b/hack/extract-licenses.go @@ -0,0 +1,238 @@ +// Copyright 2024 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "bytes" + "context" + "encoding/csv" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "os/signal" + "path/filepath" + "slices" + "strings" + "syscall" +) + +func main() { + flags := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + flags.Usage = func() { + fmt.Fprintln(flags.Output(), strings.TrimSpace(` +Usage: `+flags.Name()+` {directory} {executables...} + +This program downloads and extracts the licenses of Go modules used to build +Go executables. + +The first argument is a directory that will receive license files. It will be +created if it does not exist. This program will overwrite existing files but +not delete them. Remaining arguments must be Go executables. + +Go modules are downloaded to the Go module cache which can be changed via +the environment: https://go.dev/ref/mod#module-cache`, + )) + } + if _ = flags.Parse(os.Args[1:]); flags.NArg() < 2 || slices.ContainsFunc( + os.Args, func(arg string) bool { return arg == "-help" || arg == "--help" }, + ) { + flags.Usage() + os.Exit(2) + } + + ctx, cancel := context.WithCancel(context.Background()) + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt, syscall.SIGTERM) + go func() { <-signals; cancel() }() + + // Create the target directory. + if err := os.MkdirAll(flags.Arg(0), 0o755); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + // Extract module information from remaining arguments. + modules := identifyModules(ctx, flags.Args()[1:]...) + + // Ignore packages from Crunchy Data. Most are not available in any [proxy], + // and we handle their licenses elsewhere. + // + // This is also a quick fix to avoid the [replace] directive in our projects. + // The logic below cannot handle them. Showing xxhash versus a replace: + // + // dep github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= + // dep github.com/crunchydata/postgres-operator v0.0.0-00010101000000-000000000000 + // => ./postgres-operator (devel) + // + // [proxy]: https://go.dev/ref/mod#module-proxy + // [replace]: https://go.dev/ref/mod#go-mod-file-replace + modules = slices.DeleteFunc(modules, func(s string) bool { + return strings.HasPrefix(s, "github.com/crunchydata/") + }) + + // Download modules to the Go module cache. + directories := downloadModules(ctx, modules...) + + // Gather license files from every module into the target directory. + for module, directory := range directories { + for _, license := range findLicenses(directory) { + relative := module + strings.TrimPrefix(license, directory) + destination := filepath.Join(flags.Arg(0), relative) + + var data []byte + err := ctx.Err() + + if err == nil { + err = os.MkdirAll(filepath.Dir(destination), 0o755) + } + if err == nil { + data, err = os.ReadFile(license) + } + if err == nil { + // When we copy the licenses in the Dockerfiles, make sure + // to `--chmod` them to an appropriate permissions, e.g., 0o444 + err = os.WriteFile(destination, data, 0o600) + } + if err == nil { + fmt.Fprintln(os.Stdout, license, "=>", destination) + } + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } + } +} + +func downloadModules(ctx context.Context, modules ...string) map[string]string { + var stdout bytes.Buffer + + // Download modules and read their details into a series of JSON objects. + // - https://go.dev/ref/mod#go-mod-download + //gosec:disable G204 -- Use this environment variable to switch Go versions without touching PATH + cmd := exec.CommandContext(ctx, os.Getenv("GO"), append([]string{"mod", "download", "-json"}, modules...)...) + if cmd.Path == "" { + cmd.Path, cmd.Err = exec.LookPath("go") + } + cmd.Stderr = os.Stderr + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(cmd.ProcessState.ExitCode()) + } + + decoder := json.NewDecoder(&stdout) + results := make(map[string]string, len(modules)) + + // NOTE: The directory in the cache is a normalized spelling of the module path; + // ask Go for the directory; do not try to spell it yourself. + // - https://go.dev/ref/mod#module-cache + // - https://go.dev/ref/mod#module-path + for { + var module struct { + Path string `json:"path,omitempty"` + Version string `json:"version,omitempty"` + Dir string `json:"dir,omitempty"` + } + err := decoder.Decode(&module) + + if err == nil { + results[module.Path+"@"+module.Version] = module.Dir + continue + } + if errors.Is(err, io.EOF) { + break + } + + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + return results +} + +func findLicenses(directory string) []string { + var results []string + + // Syft maintains a list of license filenames that began as a list maintained by + // Go. We gather a similar list by matching on "copying" and "license" filenames. + // - https://pkg.go.dev/github.com/anchore/syft@v1.3.0/internal/licenses#FileNames + // + // Ignore Go files and anything in the special "testdata" directory. + // - https://go.dev/cmd/go + err := filepath.WalkDir(directory, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() && d.Name() == "testdata" { + return fs.SkipDir + } + if d.IsDir() || strings.HasSuffix(path, ".go") { + return err + } + + lower := strings.ToLower(d.Name()) + if strings.Contains(lower, "copying") || strings.Contains(lower, "license") { + results = append(results, path) + } + + return err + }) + + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + return results +} + +func identifyModules(ctx context.Context, executables ...string) []string { + var stdout bytes.Buffer + + // Use `go version -m` to read the embedded module information as a text table. + // - https://go.dev/ref/mod#go-version-m + //gosec:disable G204 -- Use this environment variable to switch Go versions without touching PATH + cmd := exec.CommandContext(ctx, os.Getenv("GO"), append([]string{"version", "-m"}, executables...)...) + if cmd.Path == "" { + cmd.Path, cmd.Err = exec.LookPath("go") + } + cmd.Stderr = os.Stderr + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(cmd.ProcessState.ExitCode()) + } + + // Parse the tab-separated table without checking row lengths + // and without enforcing strict quote mark rules. + reader := csv.NewReader(&stdout) + reader.Comma = '\t' + reader.FieldsPerRecord = -1 + reader.LazyQuotes = true + + lines, _ := reader.ReadAll() + result := make([]string, 0, len(lines)) + + for _, fields := range lines { + if len(fields) > 3 && fields[1] == "dep" { + result = append(result, fields[2]+"@"+fields[3]) + } + if len(fields) > 4 && fields[1] == "mod" && fields[4] != "" { + result = append(result, fields[2]+"@"+fields[3]) + } + } + + // The `go version -m` command returns no information for empty files, and it + // is possible for a Go executable to have no main module and no dependencies. + if len(result) == 0 { + fmt.Fprintf(os.Stderr, "no Go modules in %v\n", executables) + os.Exit(0) + } + + return result +} diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go deleted file mode 100644 index 850920fa83..0000000000 --- a/internal/bridge/crunchybridgecluster/apply.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package crunchybridgecluster - -import ( - "context" - "reflect" - - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set by -// r.Writer and the force parameter is true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -// -// NOTE: This function is duplicated from a version in the postgrescluster package -func (r *CrunchyBridgeClusterReconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Send the apply-patch with force=true. - if err == nil { - err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) - } - - return err -} diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go index 98f3897c01..8a3280f512 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -21,6 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/controller/runtime" @@ -50,8 +51,8 @@ type CrunchyBridgeClusterReconciler struct { } } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list,watch} -//+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,list,watch} +//+kubebuilder:rbac:groups="",resources="secrets",verbs={get,list,watch} // ManagedReconciler creates a [CrunchyBridgeClusterReconciler] and adds it to m. func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) error { @@ -72,7 +73,7 @@ func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) // Smarter: retry after a certain time for each cluster WatchesRawSource( runtime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { var list v1beta1.CrunchyBridgeClusterList _ = reconciler.Reader.List(ctx, &list) return runtime.Requests(initialize.Pointers(list.Items...)...) @@ -82,11 +83,11 @@ func ManagedReconciler(m ctrl.Manager, newClient func() bridge.ClientInterface) // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters Watches( &corev1.Secret{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []reconcile.Request { return runtime.Requests(reconciler.findCrunchyBridgeClustersForSecret(ctx, client.ObjectKeyFromObject(secret))...) }), ). - Complete(reconciler) + Complete(reconcile.AsReconciler(kubernetes, reconciler)) } // The owner reference created by controllerutil.SetControllerReference blocks @@ -105,47 +106,32 @@ func (r *CrunchyBridgeClusterReconciler) setControllerReference( return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,patch,update} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={patch,update} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/status",verbs={patch,update} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters/finalizers",verbs={patch,update} //+kubebuilder:rbac:groups="",resources="secrets",verbs={get} // Reconcile does the work to move the current state of the world toward the -// desired state described in a [v1beta1.CrunchyBridgeCluster] identified by req. -func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// desired state described in crunchybridgecluster. +func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, crunchybridgecluster *v1beta1.CrunchyBridgeCluster) (ctrl.Result, error) { + var err error ctx, span := tracing.Start(ctx, "reconcile-crunchybridgecluster") log := logging.FromContext(ctx) defer span.End() - // Retrieve the crunchybridgecluster from the client cache, if it exists. A deferred - // function below will send any changes to its Status field. - // - // NOTE: No DeepCopy is necessary here because controller-runtime makes a - // copy before returning from its cache. - // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 - crunchybridgecluster := &v1beta1.CrunchyBridgeCluster{} - err := r.Reader.Get(ctx, req.NamespacedName, crunchybridgecluster) + // Write any changes to the crunchybridgecluster status on the way out. + before := crunchybridgecluster.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { + status := r.StatusWriter.Patch(ctx, crunchybridgecluster, client.MergeFrom(before)) - if err == nil { - // Write any changes to the crunchybridgecluster status on the way out. - before := crunchybridgecluster.DeepCopy() - defer func() { - if !equality.Semantic.DeepEqual(before.Status, crunchybridgecluster.Status) { - status := r.StatusWriter.Patch(ctx, crunchybridgecluster, client.MergeFrom(before)) - - if err == nil && status != nil { - err = status - } else if status != nil { - log.Error(status, "Patching CrunchyBridgeCluster status") - } + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching CrunchyBridgeCluster status") } - }() - } else { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from crunchybridgecluster's dependents after - // crunchybridgecluster is deleted. - return ctrl.Result{}, tracing.Escape(span, client.IgnoreNotFound(err)) - } + } + }() // Get and validate connection secret for requests key, team, err := r.reconcileBridgeConnectionSecret(ctx, crunchybridgecluster) diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go index 0aa09517d5..f8b8bf6b12 100644 --- a/internal/bridge/crunchybridgecluster/postgres.go +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -15,6 +15,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -152,7 +153,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( roleSecrets[roleName], err = r.generatePostgresRoleSecret(cluster, role, clusterRole) } if err == nil { - err = errors.WithStack(r.apply(ctx, roleSecrets[roleName])) + err = errors.WithStack(runtime.Apply(ctx, r.Writer, roleSecrets[roleName])) } if err != nil { log.Error(err, "Issue creating role secret.") diff --git a/internal/collector/pgbackrest.go b/internal/collector/pgbackrest.go index 75cc9a55c1..1d51a22181 100644 --- a/internal/collector/pgbackrest.go +++ b/internal/collector/pgbackrest.go @@ -8,7 +8,6 @@ import ( "context" _ "embed" "encoding/json" - "fmt" "slices" "github.com/crunchydata/postgres-operator/internal/naming" @@ -25,19 +24,12 @@ func NewConfigForPgBackrestRepoHostPod( ctx context.Context, spec *v1beta1.InstrumentationSpec, repos []v1beta1.PGBackRestRepo, + directory string, ) *Config { config := NewConfig(spec) if OpenTelemetryLogsEnabled(ctx, spec) { - var directory string - for _, repo := range repos { - if repo.Volume != nil { - directory = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } - // We should only enter this function if a PVC is assigned for a dedicated repohost // but if we don't have one, exit early. if directory == "" { diff --git a/internal/collector/pgbackrest_test.go b/internal/collector/pgbackrest_test.go index 2b26d40531..653b8b7806 100644 --- a/internal/collector/pgbackrest_test.go +++ b/internal/collector/pgbackrest_test.go @@ -30,8 +30,7 @@ func TestNewConfigForPgBackrestRepoHostPod(t *testing.T) { } var instrumentation *v1beta1.InstrumentationSpec require.UnmarshalInto(t, &instrumentation, `{}`) - - config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos) + config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos, "/test/directory") result, err := config.ToYAML() assert.NilError(t, err) @@ -43,7 +42,7 @@ exporters: extensions: file_storage/pgbackrest_logs: create_directory: false - directory: /pgbackrest/repo1/log/receiver + directory: /test/directory/receiver fsync: true processors: batch/1s: @@ -101,8 +100,8 @@ processors: receivers: filelog/pgbackrest_log: include: - - /pgbackrest/repo1/log/*.log - - /pgbackrest/repo1/log/*.log.1 + - /test/directory/*.log + - /test/directory/*.log.1 multiline: line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19} storage: file_storage/pgbackrest_logs @@ -136,8 +135,7 @@ service: Volume: new(v1beta1.RepoPVC), }, } - - config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos) + config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos, "/another/directory") result, err := config.ToYAML() assert.NilError(t, err) @@ -153,7 +151,7 @@ exporters: extensions: file_storage/pgbackrest_logs: create_directory: false - directory: /pgbackrest/repo1/log/receiver + directory: /another/directory/receiver fsync: true processors: batch/1s: @@ -211,8 +209,8 @@ processors: receivers: filelog/pgbackrest_log: include: - - /pgbackrest/repo1/log/*.log - - /pgbackrest/repo1/log/*.log.1 + - /another/directory/*.log + - /another/directory/*.log.1 multiline: line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19} storage: file_storage/pgbackrest_logs diff --git a/internal/collector/postgres.go b/internal/collector/postgres.go index b73ae91a25..a279be33ca 100644 --- a/internal/collector/postgres.go +++ b/internal/collector/postgres.go @@ -16,6 +16,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -91,7 +92,7 @@ func NewConfigForPostgresPod(ctx context.Context, var postgresLogsTransforms json.RawMessage // postgresCSVNames returns the names of fields in the CSV logs for version. -func postgresCSVNames(version int) string { +func postgresCSVNames(version int32) string { // JSON is the preferred format, so use those names. // https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-JSONLOG @@ -244,8 +245,9 @@ func EnablePostgresLogging( } // pgBackRest pipeline + pgBackRestLogPath := util.GetPGBackRestLogPathForInstance(inCluster) outConfig.Extensions["file_storage/pgbackrest_logs"] = map[string]any{ - "directory": naming.PGBackRestPGDataLogPath + "/receiver", + "directory": pgBackRestLogPath + "/receiver", "create_directory": false, "fsync": true, } @@ -258,8 +260,8 @@ func EnablePostgresLogging( // a log record or two to the old file while rotation is occurring. // The collector knows not to create duplicate logs. "include": []string{ - naming.PGBackRestPGDataLogPath + "/*.log", - naming.PGBackRestPGDataLogPath + "/*.log.1", + pgBackRestLogPath + "/*.log", + pgBackRestLogPath + "/*.log.1", }, "storage": "file_storage/pgbackrest_logs", diff --git a/internal/controller/pgupgrade/apply.go b/internal/controller/pgupgrade/apply.go deleted file mode 100644 index c3e869eba6..0000000000 --- a/internal/controller/pgupgrade/apply.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package pgupgrade - -import ( - "context" - "reflect" - - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set by -// r.Writer and the force parameter is true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -func (r *PGUpgradeReconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Send the apply-patch with force=true. - if err == nil { - err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) - } - - return err -} diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index 4715c8da93..6f6379b8e4 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -21,11 +21,11 @@ import ( "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/shell" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// Upgrade job - // pgUpgradeJob returns the ObjectMeta for the pg_upgrade Job utilized to // upgrade from one major PostgreSQL version to another func pgUpgradeJob(upgrade *v1beta1.PGUpgrade) metav1.ObjectMeta { @@ -48,20 +48,24 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s oldVersion := spec.FromPostgresVersion newVersion := spec.ToPostgresVersion - // if the fetch key command is set for TDE, provide the value during initialization - initdb := `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}"` + var argEncryptionKeyCommand string if fetchKeyCommand != "" { - initdb += ` --encryption-key-command "` + fetchKeyCommand + `"` + argEncryptionKeyCommand = ` --encryption-key-command=` + shell.QuoteWord(fetchKeyCommand) } args := []string{fmt.Sprint(oldVersion), fmt.Sprint(newVersion)} script := strings.Join([]string{ + // Exit immediately when a pipeline or subshell exits non-zero or when expanding an unset variable. + `shopt -so errexit nounset`, + `declare -r data_volume='/pgdata' old_version="$1" new_version="$2"`, - `printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n\n' "$@"`, + `printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n' "$@"`, + `section() { printf '\n\n%s\n' "$@"; }`, - // Note: Rather than import the nss_wrapper init container, as we do in - // the main postgres-operator, this job does the required nss_wrapper + // NOTE: Rather than import the nss_wrapper init container, as we do in + // the PostgresCluster controller, this job does the required nss_wrapper // settings here. + `section 'Step 1 of 7: Ensuring username is postgres...'`, // Create a copy of the system group definitions, but remove the "postgres" // group or any group with the current GID. Replace them with our own that @@ -80,53 +84,91 @@ func upgradeCommand(spec *v1beta1.PGUpgradeSettings, fetchKeyCommand string) []s // Enable nss_wrapper so the current UID and GID resolve to "postgres". // - https://cwrap.org/nss_wrapper.html `export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD`, + `id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]]`, + + `section 'Step 2 of 7: Finding data and tools...'`, + `old_data="${data_volume}/pg${old_version}" && [[ -d "${old_data}" ]]`, + `new_data="${data_volume}/pg${new_version}"`, + + // Search for Postgres executables matching the old and new versions. + // Use `command -v` to look through all of PATH, then trim the executable name from the absolute path. + `old_bin=$(` + postgres.ShellPath(oldVersion) + ` && command -v postgres)`, + `old_bin="${old_bin%/postgres}"`, + `new_bin=$(` + postgres.ShellPath(newVersion) + ` && command -v pg_upgrade)`, + `new_bin="${new_bin%/pg_upgrade}"`, + + // The executables found might not be the versions we need, so do a cursory check before writing to disk. + // pg_upgrade checks every executable thoroughly since PostgreSQL v14. + // + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_10_0;f=src/bin/pg_upgrade/exec.c#l355 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_14_0;f=src/bin/pg_upgrade/exec.c#l358 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_18_0;f=src/bin/pg_upgrade/exec.c#l370 + `(set -x && [[ "$("${old_bin}/postgres" --version)" =~ ") ${old_version}"($|[^0-9]) ]])`, + `(set -x && [[ "$("${new_bin}/initdb" --version)" =~ ") ${new_version}"($|[^0-9]) ]])`, + + // pg_upgrade writes its files in "${new_data}/pg_upgrade_output.d" since PostgreSQL v15. + // Change to a writable working directory to be compatible with PostgreSQL v14 and earlier. + // + // https://www.postgresql.org/docs/release/15#id-1.11.6.20.5.11.3 + `cd "${data_volume}"`, // Below is the pg_upgrade script used to upgrade a PostgresCluster from // one major version to another. Additional information concerning the // steps used and command flag specifics can be found in the documentation: // - https://www.postgresql.org/docs/current/pgupgrade.html - // To begin, we first move to the mounted /pgdata directory and create a - // new version directory which is then initialized with the initdb command. - `cd /pgdata || exit`, - `echo -e "Step 1: Making new pgdata directory...\n"`, - `mkdir /pgdata/pg"${new_version}"`, - `echo -e "Step 2: Initializing new pgdata directory...\n"`, - initdb, - - // Before running the upgrade check, which ensures the clusters are compatible, - // proper permissions have to be set on the old pgdata directory and the - // preload library settings must be copied over. - `echo -e "\nStep 3: Setting the expected permissions on the old pgdata directory...\n"`, - `chmod 750 /pgdata/pg"${old_version}"`, - `echo -e "Step 4: Copying shared_preload_libraries setting to new postgresql.conf file...\n"`, - `echo "shared_preload_libraries = '$(/usr/pgsql-"""${old_version}"""/bin/postgres -D \`, - `/pgdata/pg"""${old_version}""" -C shared_preload_libraries)'" >> /pgdata/pg"${new_version}"/postgresql.conf`, - - // Before the actual upgrade is run, we will run the upgrade --check to - // verify everything before actually changing any data. - `echo -e "Step 5: Running pg_upgrade check...\n"`, - `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, - `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\`, - ` --new-datadir /pgdata/pg"${new_version}" --check` + argMethod + argJobs, - - // Assuming the check completes successfully, the pg_upgrade command will - // be run that actually prepares the upgraded pgdata directory. - `echo -e "\nStep 6: Running pg_upgrade...\n"`, - `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, - `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \`, - `--new-datadir /pgdata/pg"${new_version}"` + argMethod + argJobs, - - // Since we have cleared the Patroni cluster step by removing the EndPoints, we copy patroni.dynamic.json - // from the old data dir to help retain PostgreSQL parameters you had set before. - // - https://patroni.readthedocs.io/en/latest/existing_data.html#major-upgrade-of-postgresql-version - `echo -e "\nStep 7: Copying patroni.dynamic.json...\n"`, - `cp /pgdata/pg"${old_version}"/patroni.dynamic.json /pgdata/pg"${new_version}"`, - - `echo -e "\npg_upgrade Job Complete!"`, + // Examine the old data directory. + `control=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/pg_controldata")`, + `read -r checksums <<< "${control##*page checksum version:}"`, + + // Data checksums on the old and new data directories must match. + // Configuring these checksums depends on the version of initdb: + // + // - PostgreSQL v17 and earlier: disabled by default, enable with "--data-checksums" + // - PostgreSQL v18: enabled by default, enable with "--data-checksums", disable with "--no-data-checksums" + // + // https://www.postgresql.org/docs/release/18#RELEASE-18-MIGRATION + // + // Data page checksum version zero means checksums are disabled. + // Produce an initdb argument that enables or disables data checksums. + // + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_11_0;f=src/bin/pg_verify_checksums/pg_verify_checksums.c#l303 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_12_0;f=src/bin/pg_checksums/pg_checksums.c#l523 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_18_0;f=src/bin/pg_checksums/pg_checksums.c#l571 + `checksums=$(if [[ "${checksums}" -gt 0 ]]; then echo '--data-checksums'; elif [[ "${new_version}" -ge 18 ]]; then echo '--no-data-checksums'; fi)`, + + `section 'Step 3 of 7: Initializing new data directory...'`, + `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access ${checksums}` + argEncryptionKeyCommand, + + // Read the configured value then quote it; every single-quote U+0027 is replaced by two. + // + // https://www.postgresql.org/docs/current/config-setting.html + // https://www.gnu.org/software/bash/manual/bash.html#ANSI_002dC-Quoting + `section 'Step 4 of 7: Copying shared_preload_libraries parameter...'`, + `value=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/postgres" -C shared_preload_libraries)`, + `echo >> "${new_data}/postgresql.conf" "shared_preload_libraries = '${value//$'\''/$'\'\''}'"`, + + // NOTE: The default for --new-bindir is the directory of pg_upgrade since PostgreSQL v13. + // + // https://www.postgresql.org/docs/release/13#id-1.11.6.28.5.11 + `section 'Step 5 of 7: Checking for potential issues...'`, + `"${new_bin}/pg_upgrade" --check` + argMethod + argJobs + ` \`, + `--old-bindir="${old_bin}" --old-datadir="${old_data}" \`, + `--new-bindir="${new_bin}" --new-datadir="${new_data}"`, + + `section 'Step 6 of 7: Performing upgrade...'`, + `(set -x && time "${new_bin}/pg_upgrade"` + argMethod + argJobs + ` \`, + `--old-bindir="${old_bin}" --old-datadir="${old_data}" \`, + `--new-bindir="${new_bin}" --new-datadir="${new_data}")`, + + // https://patroni.readthedocs.io/en/latest/existing_data.html#major-upgrade-of-postgresql-version + `section 'Step 7 of 7: Copying Patroni settings...'`, + `(set -x && cp "${old_data}/patroni.dynamic.json" "${new_data}")`, + + `section 'Success!'`, }, "\n") - return append([]string{"bash", "-ceu", "--", script, "upgrade"}, args...) + return append([]string{"bash", "-c", "--", script, "upgrade"}, args...) } // largestWholeCPU returns the maximum CPU request or limit as a non-negative @@ -232,38 +274,37 @@ func (r *PGUpgradeReconciler) generateUpgradeJob( // We currently target the `pgdata/pg{old_version}` and `pgdata/pg{old_version}_wal` // directories for removal. func removeDataCommand(upgrade *v1beta1.PGUpgrade) []string { - oldVersion := fmt.Sprint(upgrade.Spec.FromPostgresVersion) + oldVersion := upgrade.Spec.FromPostgresVersion // Before removing the directories (both data and wal), we check that // the directory is not in use by running `pg_controldata` and making sure // the server state is "shut down in recovery" - // TODO(benjaminjb): pg_controldata seems pretty stable, but might want to - // experiment with a few more versions. - args := []string{oldVersion} + args := []string{fmt.Sprint(oldVersion)} script := strings.Join([]string{ - `declare -r old_version="$1"`, - `printf 'Removing PostgreSQL data dir for pg%s...\n\n' "$@"`, - `echo -e "Checking the directory exists and isn't being used...\n"`, - `cd /pgdata || exit`, - // The string `shut down in recovery` is the dbstate that postgres sets from - // at least version 10 to 14 when a replica has been shut down. - // - https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/bin/pg_controldata/pg_controldata.c;h=f911f98d946d83f1191abf35239d9b4455c5f52a;hb=HEAD#l59 - // Note: `pg_controldata` is actually used by `pg_upgrade` before upgrading - // to make sure that the server in question is shut down as a primary; - // that aligns with our use here, where we're making sure that the server in question - // was shut down as a replica. - // - https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/bin/pg_upgrade/controldata.c;h=41b8f69b8cbe4f40e6098ad84c2e8e987e24edaf;hb=HEAD#l122 - `if [ "$(/usr/pgsql-"${old_version}"/bin/pg_controldata /pgdata/pg"${old_version}" | grep -c "shut down in recovery")" -ne 1 ]; then echo -e "Directory in use, cannot remove..."; exit 1; fi`, - `echo -e "Removing old pgdata directory...\n"`, - // When deleting the wal directory, use `realpath` to resolve the symlink from - // the pgdata directory. This is necessary because the wal directory can be - // mounted at different places depending on if an external wal PVC is used, - // i.e. `/pgdata/pg14_wal` vs `/pgwal/pg14_wal` - `rm -rf /pgdata/pg"${old_version}" "$(realpath /pgdata/pg${old_version}/pg_wal)"`, - `echo -e "Remove Data Job Complete!"`, + // Exit immediately when a pipeline or subshell exits non-zero or when expanding an unset variable. + `shopt -so errexit nounset`, + + `declare -r data_volume='/pgdata' old_version="$1"`, + `printf 'Removing PostgreSQL %s data...\n\n' "$@"`, + `delete() (set -x && rm -rf -- "$@")`, + + `old_data="${data_volume}/pg${old_version}"`, + `control=$(` + postgres.ShellPath(oldVersion) + ` && LC_ALL=C pg_controldata "${old_data}")`, + `read -r state <<< "${control##*cluster state:}"`, + + // We expect exactly one state for a replica that has been stopped. + // + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_10_0;f=src/bin/pg_controldata/pg_controldata.c#l55 + // https://git.postgresql.org/gitweb/?p=postgresql.git;hb=refs/tags/REL_17_0;f=src/bin/pg_controldata/pg_controldata.c#l58 + `[[ "${state}" == 'shut down in recovery' ]] || { printf >&2 'Unexpected state! %q\n' "${state}"; exit 1; }`, + + // "rm" does not follow symbolic links. + // Delete the old data directory after subdirectories that contain versioned data. + `delete "${old_data}/pg_wal/"`, + `delete "${old_data}" && echo 'Success!'`, }, "\n") - return append([]string{"bash", "-ceu", "--", script, "remove"}, args...) + return append([]string{"bash", "-c", "--", script, "remove"}, args...) } // generateRemoveDataJob returns a Job that can remove the data diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index a94641d4c6..cd96a4a3ef 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -87,7 +87,7 @@ func TestUpgradeCommand(t *testing.T) { spec := &v1beta1.PGUpgradeSettings{Jobs: tt.Spec} command := upgradeCommand(spec, "") assert.Assert(t, len(command) > 3) - assert.DeepEqual(t, []string{"bash", "-ceu", "--"}, command[:3]) + assert.DeepEqual(t, []string{"bash", "-c", "--"}, command[:3]) script := command[3] assert.Assert(t, cmp.Contains(script, tt.Args)) @@ -111,7 +111,7 @@ func TestUpgradeCommand(t *testing.T) { spec := &v1beta1.PGUpgradeSettings{TransferMethod: tt.Spec} command := upgradeCommand(spec, "") assert.Assert(t, len(command) > 3) - assert.DeepEqual(t, []string{"bash", "-ceu", "--"}, command[:3]) + assert.DeepEqual(t, []string{"bash", "-c", "--"}, command[:3]) script := command[3] assert.Assert(t, cmp.Contains(script, tt.Args)) @@ -196,11 +196,14 @@ spec: containers: - command: - bash - - -ceu + - -c - -- - |- + shopt -so errexit nounset declare -r data_volume='/pgdata' old_version="$1" new_version="$2" - printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n\n' "$@" + printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n' "$@" + section() { printf '\n\n%s\n' "$@"; } + section 'Step 1 of 7: Ensuring username is postgres...' gid=$(id -G); NSS_WRAPPER_GROUP=$(mktemp) (sed "/^postgres:x:/ d; /^[^:]*:x:${gid%% *}:/ d" /etc/group echo "postgres:x:${gid%% *}:") > "${NSS_WRAPPER_GROUP}" @@ -208,27 +211,36 @@ spec: (sed "/^postgres:x:/ d; /^[^:]*:x:${uid}:/ d" /etc/passwd echo "postgres:x:${uid}:${gid%% *}::${data_volume}:") > "${NSS_WRAPPER_PASSWD}" export LD_PRELOAD='libnss_wrapper.so' NSS_WRAPPER_GROUP NSS_WRAPPER_PASSWD - cd /pgdata || exit - echo -e "Step 1: Making new pgdata directory...\n" - mkdir /pgdata/pg"${new_version}" - echo -e "Step 2: Initializing new pgdata directory...\n" - /usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}" - echo -e "\nStep 3: Setting the expected permissions on the old pgdata directory...\n" - chmod 750 /pgdata/pg"${old_version}" - echo -e "Step 4: Copying shared_preload_libraries setting to new postgresql.conf file...\n" - echo "shared_preload_libraries = '$(/usr/pgsql-"""${old_version}"""/bin/postgres -D \ - /pgdata/pg"""${old_version}""" -C shared_preload_libraries)'" >> /pgdata/pg"${new_version}"/postgresql.conf - echo -e "Step 5: Running pg_upgrade check...\n" - time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ - --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\ - --new-datadir /pgdata/pg"${new_version}" --check --link --jobs=1 - echo -e "\nStep 6: Running pg_upgrade...\n" - time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ - --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \ - --new-datadir /pgdata/pg"${new_version}" --link --jobs=1 - echo -e "\nStep 7: Copying patroni.dynamic.json...\n" - cp /pgdata/pg"${old_version}"/patroni.dynamic.json /pgdata/pg"${new_version}" - echo -e "\npg_upgrade Job Complete!" + id; [[ "$(id -nu)" == 'postgres' && "$(id -ng)" == 'postgres' ]] + section 'Step 2 of 7: Finding data and tools...' + old_data="${data_volume}/pg${old_version}" && [[ -d "${old_data}" ]] + new_data="${data_volume}/pg${new_version}" + old_bin=$(PATH="/usr/lib/postgresql/19/bin:/usr/libexec/postgresql19:/usr/pgsql-19/bin${PATH+:${PATH}}" && command -v postgres) + old_bin="${old_bin%/postgres}" + new_bin=$(PATH="/usr/lib/postgresql/25/bin:/usr/libexec/postgresql25:/usr/pgsql-25/bin${PATH+:${PATH}}" && command -v pg_upgrade) + new_bin="${new_bin%/pg_upgrade}" + (set -x && [[ "$("${old_bin}/postgres" --version)" =~ ") ${old_version}"($|[^0-9]) ]]) + (set -x && [[ "$("${new_bin}/initdb" --version)" =~ ") ${new_version}"($|[^0-9]) ]]) + cd "${data_volume}" + control=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/pg_controldata") + read -r checksums <<< "${control##*page checksum version:}" + checksums=$(if [[ "${checksums}" -gt 0 ]]; then echo '--data-checksums'; elif [[ "${new_version}" -ge 18 ]]; then echo '--no-data-checksums'; fi) + section 'Step 3 of 7: Initializing new data directory...' + PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access ${checksums} + section 'Step 4 of 7: Copying shared_preload_libraries parameter...' + value=$(LC_ALL=C PGDATA="${old_data}" "${old_bin}/postgres" -C shared_preload_libraries) + echo >> "${new_data}/postgresql.conf" "shared_preload_libraries = '${value//$'\''/$'\'\''}'" + section 'Step 5 of 7: Checking for potential issues...' + "${new_bin}/pg_upgrade" --check --link --jobs=1 \ + --old-bindir="${old_bin}" --old-datadir="${old_data}" \ + --new-bindir="${new_bin}" --new-datadir="${new_data}" + section 'Step 6 of 7: Performing upgrade...' + (set -x && time "${new_bin}/pg_upgrade" --link --jobs=1 \ + --old-bindir="${old_bin}" --old-datadir="${old_data}" \ + --new-bindir="${new_bin}" --new-datadir="${new_data}") + section 'Step 7 of 7: Copying Patroni settings...' + (set -x && cp "${old_data}/patroni.dynamic.json" "${new_data}") + section 'Success!' - upgrade - "19" - "25" @@ -263,7 +275,7 @@ status: {} tdeJob := reconciler.generateUpgradeJob(ctx, upgrade, startup, "echo testKey") assert.Assert(t, cmp.MarshalContains(tdeJob, - `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}" --encryption-key-command "echo testKey"`)) + `PGDATA="${new_data}" "${new_bin}/initdb" --allow-group-access ${checksums} --encryption-key-command='echo testKey'`)) } func TestGenerateRemoveDataJob(t *testing.T) { @@ -339,17 +351,19 @@ spec: containers: - command: - bash - - -ceu + - -c - -- - |- - declare -r old_version="$1" - printf 'Removing PostgreSQL data dir for pg%s...\n\n' "$@" - echo -e "Checking the directory exists and isn't being used...\n" - cd /pgdata || exit - if [ "$(/usr/pgsql-"${old_version}"/bin/pg_controldata /pgdata/pg"${old_version}" | grep -c "shut down in recovery")" -ne 1 ]; then echo -e "Directory in use, cannot remove..."; exit 1; fi - echo -e "Removing old pgdata directory...\n" - rm -rf /pgdata/pg"${old_version}" "$(realpath /pgdata/pg${old_version}/pg_wal)" - echo -e "Remove Data Job Complete!" + shopt -so errexit nounset + declare -r data_volume='/pgdata' old_version="$1" + printf 'Removing PostgreSQL %s data...\n\n' "$@" + delete() (set -x && rm -rf -- "$@") + old_data="${data_volume}/pg${old_version}" + control=$(PATH="/usr/lib/postgresql/19/bin:/usr/libexec/postgresql19:/usr/pgsql-19/bin${PATH+:${PATH}}" && LC_ALL=C pg_controldata "${old_data}") + read -r state <<< "${control##*cluster state:}" + [[ "${state}" == 'shut down in recovery' ]] || { printf >&2 'Unexpected state! %q\n' "${state}"; exit 1; } + delete "${old_data}/pg_wal/" + delete "${old_data}" && echo 'Success!' - remove - "19" image: img4 diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index 653ea9e55e..61eb39a7c8 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -17,6 +17,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller/runtime" @@ -49,9 +50,9 @@ type PGUpgradeReconciler struct { } } -//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list,watch} -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list,watch} -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} +//+kubebuilder:rbac:groups="batch",resources="jobs",verbs={get,list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={get,list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} // ManagedReconciler creates a [PGUpgradeReconciler] and adds it to m. func ManagedReconciler(m ctrl.Manager, r registration.Registration) error { @@ -71,11 +72,11 @@ func ManagedReconciler(m ctrl.Manager, r registration.Registration) error { Owns(&batchv1.Job{}). Watches( v1beta1.NewPostgresCluster(), - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []reconcile.Request { return runtime.Requests(reconciler.findUpgradesForPostgresCluster(ctx, client.ObjectKeyFromObject(cluster))...) }), ). - Complete(reconciler) + Complete(reconcile.AsReconciler(kubernetes, reconciler)) } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={list} @@ -103,7 +104,6 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( return matching } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={get} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades/status",verbs={patch} //+kubebuilder:rbac:groups="batch",resources="jobs",verbs={delete} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get} @@ -114,42 +114,26 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( //+kubebuilder:rbac:groups="",resources="endpoints",verbs={delete} // Reconcile does the work to move the current state of the world toward the -// desired state described in a [v1beta1.PGUpgrade] identified by req. -func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, err error) { +// desired state described in upgrade. +func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, upgrade *v1beta1.PGUpgrade) (result ctrl.Result, err error) { ctx, span := tracing.Start(ctx, "reconcile-pgupgrade") log := logging.FromContext(ctx) defer span.End() defer func(s tracing.Span) { _ = tracing.Escape(s, err) }(span) - // Retrieve the upgrade from the client cache, if it exists. A deferred - // function below will send any changes to its Status field. - // - // NOTE: No DeepCopy is necessary here because controller-runtime makes a - // copy before returning from its cache. - // - https://github.com/kubernetes-sigs/controller-runtime/issues/1235 - upgrade := &v1beta1.PGUpgrade{} - err = r.Reader.Get(ctx, req.NamespacedName, upgrade) - - if err == nil { - // Write any changes to the upgrade status on the way out. - before := upgrade.DeepCopy() - defer func() { - if !equality.Semantic.DeepEqual(before.Status, upgrade.Status) { - status := r.StatusWriter.Patch(ctx, upgrade, client.MergeFrom(before)) - - if err == nil && status != nil { - err = status - } else if status != nil { - log.Error(status, "Patching PGUpgrade status") - } + // Write any changes to the upgrade status on the way out. + before := upgrade.DeepCopy() + defer func() { + if !equality.Semantic.DeepEqual(before.Status, upgrade.Status) { + status := r.StatusWriter.Patch(ctx, upgrade, client.MergeFrom(before)) + + if err == nil && status != nil { + err = status + } else if status != nil { + log.Error(status, "Patching PGUpgrade status") } - }() - } else { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from upgrade's dependents after - // upgrade is deleted. - return ctrl.Result{}, client.IgnoreNotFound(err) - } + } + }() // Validate the remainder of the upgrade specification. These can likely // move to CEL rules or a webhook when supported. @@ -440,7 +424,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Set the cluster status when we know the upgrade has completed successfully. // This will serve to help the user see that the upgrade has completed if they // are only watching the PostgresCluster - patch.Status.PostgresVersion = int(upgrade.Spec.ToPostgresVersion) + patch.Status.PostgresVersion = upgrade.Spec.ToPostgresVersion // Set the pgBackRest status for bootstrapping patch.Status.PGBackRest.Repos = []v1beta1.RepoStatus{} @@ -453,7 +437,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // TODO: error from apply could mean that the job exists with a different spec. if err == nil && !upgradeJobComplete { - err = errors.WithStack(r.apply(ctx, + err = errors.WithStack(runtime.Apply(ctx, r.Writer, r.generateUpgradeJob(ctx, upgrade, world.ClusterPrimary, config.FetchKeyCommand(&world.Cluster.Spec)))) } @@ -464,7 +448,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if err == nil && upgradeJobComplete && !removeDataJobsComplete { for _, sts := range world.ClusterReplicas { if err == nil { - err = r.apply(ctx, r.generateRemoveDataJob(ctx, upgrade, sts)) + err = runtime.Apply(ctx, r.Writer, r.generateRemoveDataJob(ctx, upgrade, sts)) } } } diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index ce3d2fb9e5..22aa1d3ce6 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -6,57 +6,17 @@ package postgrescluster import ( "context" - "reflect" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" ) // apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set to -// r.Owner and the force parameter is true. +// updates object with any returned content. The fieldManager is set by +// r.Writer and the force parameter is true. // - https://docs.k8s.io/reference/using-api/server-side-apply/#managers // - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts func (r *Reconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Keep a copy of the object before any API calls. - intent := object.DeepCopyObject() - patch := kubeapi.NewJSONPatch() - - // Send the apply-patch with force=true. - if err == nil { - err = r.patch(ctx, object, apply, client.ForceOwnership) - } - - // Some fields cannot be server-side applied correctly. When their outcome - // does not match the intent, send a json-patch to get really specific. - switch actual := object.(type) { - case *corev1.Service: - applyServiceSpec(patch, actual.Spec, intent.(*corev1.Service).Spec, "spec") - } - - // Send the json-patch when necessary. - if err == nil && !patch.IsEmpty() { - err = r.patch(ctx, object, patch) - } - return err -} - -// applyServiceSpec is called by Reconciler.apply to work around issues -// with server-side apply. -func applyServiceSpec( - patch *kubeapi.JSON6902, actual, intent corev1.ServiceSpec, path ...string, -) { - // Service.Spec.Selector is not +mapType=atomic until Kubernetes 1.22. - // - https://issue.k8s.io/97970 - if !equality.Semantic.DeepEqual(actual.Selector, intent.Selector) { - patch.Replace(append(path, "selector")...)(intent.Selector) - } + return runtime.Apply(ctx, r.Writer, object) } diff --git a/internal/controller/postgrescluster/autogrow.go b/internal/controller/postgrescluster/autogrow.go index 6abe380c06..e96d69f19c 100644 --- a/internal/controller/postgrescluster/autogrow.go +++ b/internal/controller/postgrescluster/autogrow.go @@ -63,7 +63,11 @@ func (r *Reconciler) storeDesiredRequest( } if limitSet && current.Value() > previous.Value() { - r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeAutoGrow", + eventType := corev1.EventTypeNormal + if volumeType == "pgWAL" { + eventType = corev1.EventTypeWarning + } + r.Recorder.Eventf(cluster, eventType, "VolumeAutoGrow", "%s volume expansion to %v requested for %s/%s.", volumeType, current.String(), cluster.Name, host) } @@ -152,12 +156,13 @@ func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.Postgre // Otherwise, if the feature gate is not enabled, do not autogrow. } else if feature.Enabled(ctx, feature.AutoGrowVolumes) { - // determine the appropriate volume request based on what's set in the status - if dpv, err := getDesiredVolumeSize( + // Determine the appropriate volume request based on what's set in the status. + // Note: request size set by reference. + if badDesiredVolumeRequest, err := getDesiredVolumeSize( cluster, volumeType, host, volumeRequestSize, ); err != nil { log.Error(err, "For "+cluster.Name+"/"+host+ - ": Unable to parse "+volumeType+" volume request: "+dpv) + ": Unable to parse "+volumeType+" volume request: "+badDesiredVolumeRequest) } // If the volume request size is greater than or equal to the limit and the @@ -165,8 +170,11 @@ func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.Postgre // If the user manually requests a lower limit that is smaller than the current // or requested volume size, it will be ignored in favor of the limit value. if volumeRequestSize.Value() >= volumeLimitFromSpec.Value() { - - r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeLimitReached", + eventType := corev1.EventTypeNormal + if volumeType == "pgWAL" { + eventType = corev1.EventTypeWarning + } + r.Recorder.Eventf(cluster, eventType, "VolumeLimitReached", "%s volume(s) for %s/%s are at size limit (%v).", volumeType, cluster.Name, host, volumeLimitFromSpec) @@ -196,15 +204,15 @@ func getDesiredVolumeSize(cluster *v1beta1.PostgresCluster, case volumeType == "pgData": for i := range cluster.Status.InstanceSets { if instanceSpecName == cluster.Status.InstanceSets[i].Name { - for _, dpv := range cluster.Status.InstanceSets[i].DesiredPGDataVolume { - if dpv != "" { - desiredRequest, err := resource.ParseQuantity(dpv) + for _, desiredRequestString := range cluster.Status.InstanceSets[i].DesiredPGDataVolume { + if desiredRequestString != "" { + desiredRequest, err := resource.ParseQuantity(desiredRequestString) if err == nil { if desiredRequest.Value() > volumeRequestSize.Value() { *volumeRequestSize = desiredRequest } } else { - return dpv, err + return desiredRequestString, err } } } @@ -214,15 +222,15 @@ func getDesiredVolumeSize(cluster *v1beta1.PostgresCluster, case volumeType == "pgWAL": for i := range cluster.Status.InstanceSets { if instanceSpecName == cluster.Status.InstanceSets[i].Name { - for _, dpv := range cluster.Status.InstanceSets[i].DesiredPGWALVolume { - if dpv != "" { - desiredRequest, err := resource.ParseQuantity(dpv) + for _, desiredRequestString := range cluster.Status.InstanceSets[i].DesiredPGWALVolume { + if desiredRequestString != "" { + desiredRequest, err := resource.ParseQuantity(desiredRequestString) if err == nil { if desiredRequest.Value() > volumeRequestSize.Value() { *volumeRequestSize = desiredRequest } } else { - return dpv, err + return desiredRequestString, err } } } @@ -238,15 +246,15 @@ func getDesiredVolumeSize(cluster *v1beta1.PostgresCluster, } for i := range cluster.Status.PGBackRest.Repos { if volumeType == cluster.Status.PGBackRest.Repos[i].Name { - dpv := cluster.Status.PGBackRest.Repos[i].DesiredRepoVolume - if dpv != "" { - desiredRequest, err := resource.ParseQuantity(dpv) + desiredRequestString := cluster.Status.PGBackRest.Repos[i].DesiredRepoVolume + if desiredRequestString != "" { + desiredRequest, err := resource.ParseQuantity(desiredRequestString) if err == nil { if desiredRequest.Value() > volumeRequestSize.Value() { *volumeRequestSize = desiredRequest } } else { - return dpv, err + return desiredRequestString, err } } } diff --git a/internal/controller/postgrescluster/autogrow_test.go b/internal/controller/postgrescluster/autogrow_test.go index e276e60a19..180bd49084 100644 --- a/internal/controller/postgrescluster/autogrow_test.go +++ b/internal/controller/postgrescluster/autogrow_test.go @@ -101,6 +101,7 @@ func TestStoreDesiredRequest(t *testing.T) { expectedLog string expectedNumEvents int expectedEvent string + expectedEventType string }{{ tcName: "PGData-BadRequestNoBackup", Voltype: "pgData", host: "red", @@ -122,13 +123,13 @@ func TestStoreDesiredRequest(t *testing.T) { tcName: "PGData-BadBackupRequest", Voltype: "pgData", host: "red", desiredRequest: "2Gi", desiredRequestBackup: "bar", expectedValue: "2Gi", - expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 2Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 2Gi requested for rhino/red.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 1, expectedLog: "Unable to parse pgData volume request from status backup (bar) for rhino/red", }, { tcName: "PGData-ValueUpdateWithEvent", Voltype: "pgData", host: "red", desiredRequest: "1Gi", desiredRequestBackup: "", expectedValue: "1Gi", - expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 1Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgData volume expansion to 1Gi requested for rhino/red.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 0, }, { tcName: "PGWAL-BadRequestNoBackup", @@ -156,13 +157,13 @@ func TestStoreDesiredRequest(t *testing.T) { tcName: "PGWAL-BadBackupRequest", Voltype: "pgWAL", host: "red", desiredRequest: "2Gi", desiredRequestBackup: "bar", expectedValue: "2Gi", - expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 2Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 2Gi requested for rhino/red.", expectedEventType: corev1.EventTypeWarning, expectedNumLogs: 1, expectedLog: "Unable to parse pgWAL volume request from status backup (bar) for rhino/red", }, { tcName: "PGWAL-ValueUpdateWithEvent", Voltype: "pgWAL", host: "red", desiredRequest: "1Gi", desiredRequestBackup: "", expectedValue: "1Gi", - expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 1Gi requested for rhino/red.", + expectedNumEvents: 1, expectedEvent: "pgWAL volume expansion to 1Gi requested for rhino/red.", expectedEventType: corev1.EventTypeWarning, expectedNumLogs: 0, }, { tcName: "Repo-BadRequestNoBackup", @@ -190,13 +191,13 @@ func TestStoreDesiredRequest(t *testing.T) { tcName: "Repo-BadBackupRequest", Voltype: "repo1", host: "repo-host", desiredRequest: "2Gi", desiredRequestBackup: "bar", expectedValue: "2Gi", - expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 2Gi requested for rhino/repo-host.", + expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 2Gi requested for rhino/repo-host.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 1, expectedLog: "Unable to parse repo1 volume request from status backup (bar) for rhino/repo-host", }, { tcName: "Repo-ValueUpdateWithEvent", Voltype: "repo1", host: "repo-host", desiredRequest: "1Gi", desiredRequestBackup: "", expectedValue: "1Gi", - expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 1Gi requested for rhino/repo-host.", + expectedNumEvents: 1, expectedEvent: "repo1 volume expansion to 1Gi requested for rhino/repo-host.", expectedEventType: corev1.EventTypeNormal, expectedNumLogs: 0, }} @@ -220,6 +221,7 @@ func TestStoreDesiredRequest(t *testing.T) { assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") assert.Equal(t, recorder.Events[0].Note, tc.expectedEvent) + assert.Equal(t, recorder.Events[0].Type, tc.expectedEventType) } assert.Equal(t, len(*logs), tc.expectedNumLogs) if tc.expectedNumLogs == 1 { @@ -430,6 +432,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeRequestOverLimit") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeWarning) assert.Equal(t, recorder.Events[0].Note, "pgData volume request (4Gi) for elephant/some-instance is greater than set limit (3Gi). Limit value will be used.") }) @@ -599,6 +602,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeNormal) assert.Equal(t, recorder.Events[0].Note, "pgData volume(s) for elephant/some-instance are at size limit (2Gi).") }) @@ -629,11 +633,13 @@ resources: if event.Reason == "VolumeLimitReached" { found1 = true assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Type, corev1.EventTypeNormal) assert.Equal(t, event.Note, "pgData volume(s) for elephant/some-instance are at size limit (5Gi).") } if event.Reason == "DesiredVolumeAboveLimit" { found2 = true assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Type, corev1.EventTypeWarning) assert.Equal(t, event.Note, "The desired size (10Gi) for the elephant/some-instance pgData volume(s) is greater than the size limit (5Gi).") } @@ -675,6 +681,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeNormal) assert.Equal(t, recorder.Events[0].Note, "repo1 volume(s) for elephant/repo-host are at size limit (2Gi).") }) @@ -707,6 +714,7 @@ resources: assert.Equal(t, len(recorder.Events), 1) assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Type, corev1.EventTypeWarning) assert.Equal(t, recorder.Events[0].Note, "pgWAL volume(s) for elephant/another-instance are at size limit (3Gi).") }) diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index b819291ae4..5ea4ace886 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -18,7 +18,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/feature" @@ -82,26 +81,25 @@ func TestCustomLabels(t *testing.T) { require.ParallelCapacity(t, 2) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) // Reconcile the cluster - result, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) + result, err := reconciler.Reconcile(ctx, cluster) assert.NilError(t, err) assert.Assert(t, result.Requeue == false) } @@ -168,7 +166,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -216,7 +214,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -263,7 +261,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -298,7 +296,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -320,26 +318,25 @@ func TestCustomAnnotations(t *testing.T) { require.ParallelCapacity(t, 2) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) // Reconcile the cluster - result, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) + result, err := reconciler.Reconcile(ctx, cluster) assert.NilError(t, err) assert.Assert(t, result.Requeue == false) } @@ -407,7 +404,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -455,7 +452,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -502,7 +499,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -537,7 +534,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -554,10 +551,7 @@ func TestCustomAnnotations(t *testing.T) { } func TestGenerateClusterPrimaryService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns2" @@ -658,7 +652,7 @@ func TestReconcileClusterPrimaryService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{Writer: client.WithFieldOwner(cc, t.Name())} cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -676,10 +670,7 @@ func TestReconcileClusterPrimaryService(t *testing.T) { } func TestGenerateClusterReplicaServiceIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns1" diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 5cd347a7f3..7d015c4012 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -45,40 +45,39 @@ const controllerName = naming.ControllerPostgresCluster // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { - Client client.Client - Owner client.FieldOwner PodExec func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error + + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + List(context.Context, client.ObjectList, ...client.ListOption) error + } + Writer interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error + DeleteAllOf(context.Context, client.Object, ...client.DeleteAllOfOption) error + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + Update(context.Context, client.Object, ...client.UpdateOption) error + } + StatusWriter interface { + Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error + } + Recorder record.EventRecorder Registration registration.Registration } // +kubebuilder:rbac:groups="",resources="events",verbs={create,patch} -// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} // +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/status",verbs={patch} -// Reconcile reconciles a ConfigMap in a namespace managed by the PostgreSQL Operator func (r *Reconciler) Reconcile( - ctx context.Context, request reconcile.Request) (reconcile.Result, error, + ctx context.Context, cluster *v1beta1.PostgresCluster) (reconcile.Result, error, ) { ctx, span := tracing.Start(ctx, "reconcile-postgrescluster") log := logging.FromContext(ctx) defer span.End() - // get the postgrescluster from the cache - cluster := &v1beta1.PostgresCluster{} - if err := r.Client.Get(ctx, request.NamespacedName, cluster); err != nil { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from cluster's dependents after - // cluster is deleted. - if err = client.IgnoreNotFound(err); err != nil { - log.Error(err, "unable to fetch PostgresCluster") - } - return runtime.ErrorWithBackoff(tracing.Escape(span, err)) - } - // Set any defaults that may not have been stored in the API. No DeepCopy // is necessary because controller-runtime makes a copy before returning // from its cache. @@ -175,8 +174,7 @@ func (r *Reconciler) Reconcile( if !equality.Semantic.DeepEqual(before.Status, cluster.Status) { // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track // managed fields on the status subresource: https://issue.k8s.io/88901 - if err := r.Client.Status().Patch( - ctx, cluster, client.MergeFrom(before), r.Owner); err != nil { + if err := r.StatusWriter.Patch(ctx, cluster, client.MergeFrom(before)); err != nil { log.Error(err, "patching cluster status") return err } @@ -400,24 +398,12 @@ func (r *Reconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Client.Delete(ctx, object, exactly) + return r.Writer.Delete(ctx, object, exactly) } return nil } -// patch sends patch to object's endpoint in the Kubernetes API and updates -// object with any returned content. The fieldManager is set to r.Owner, but -// can be overridden in options. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -func (r *Reconciler) patch( - ctx context.Context, object client.Object, - patch client.Patch, options ...client.PatchOption, -) error { - options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) -} - // The owner reference created by controllerutil.SetControllerReference blocks // deletion. The OwnerReferencesPermissionEnforcement plugin requires that the // creator of such a reference have either "delete" permission on the owner or @@ -431,7 +417,7 @@ func (r *Reconciler) patch( func (r *Reconciler) setControllerReference( owner *v1beta1.PostgresCluster, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) } // setOwnerReference sets an OwnerReference on the object without setting the @@ -439,7 +425,7 @@ func (r *Reconciler) setControllerReference( func (r *Reconciler) setOwnerReference( owner *v1beta1.PostgresCluster, controlled client.Object, ) error { - return controllerutil.SetOwnerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetOwnerReference(owner, controlled, runtime.Scheme) } // +kubebuilder:rbac:groups="",resources="configmaps",verbs={get,list,watch} @@ -455,18 +441,24 @@ func (r *Reconciler) setOwnerReference( // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={get,list,watch} // +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={get,list,watch} // +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={get,list,watch} +// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch} -// SetupWithManager adds the PostgresCluster controller to the provided runtime manager -func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { - if r.PodExec == nil { - var err error - r.PodExec, err = runtime.NewPodExecutor(mgr.GetConfig()) - if err != nil { - return err - } +// ManagedReconciler creates a [Reconciler] and adds it to m. +func ManagedReconciler(m manager.Manager, r registration.Registration) error { + exec, err := runtime.NewPodExecutor(m.GetConfig()) + kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPostgresCluster) + recorder := m.GetEventRecorderFor(naming.ControllerPostgresCluster) + + reconciler := &Reconciler{ + PodExec: exec, + Reader: kubernetes, + Recorder: recorder, + Registration: r, + StatusWriter: kubernetes.Status(), + Writer: kubernetes, } - return builder.ControllerManagedBy(mgr). + return errors.Join(err, builder.ControllerManagedBy(m). For(&v1beta1.PostgresCluster{}). Owns(&corev1.ConfigMap{}). Owns(&corev1.Endpoints{}). @@ -481,8 +473,8 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { Owns(&rbacv1.RoleBinding{}). Owns(&batchv1.CronJob{}). Owns(&policyv1.PodDisruptionBudget{}). - Watches(&corev1.Pod{}, r.watchPods()). + Watches(&corev1.Pod{}, reconciler.watchPods()). Watches(&appsv1.StatefulSet{}, - r.controllerRefHandlerFuncs()). // watch all StatefulSets - Complete(r) + reconciler.controllerRefHandlerFuncs()). // watch all StatefulSets + Complete(reconcile.AsReconciler(kubernetes, reconciler))) } diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index 6caa58b85d..e73b1701f1 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -12,12 +12,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -28,21 +27,17 @@ import ( func (r *Reconciler) adoptObject(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, obj client.Object) error { - if err := controllerutil.SetControllerReference(postgresCluster, obj, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, obj); err != nil { return err } - patchBytes, err := kubeapi.NewMergePatch(). + patchBytes, err := runtime.NewMergePatch(). Add("metadata", "ownerReferences")(obj.GetOwnerReferences()).Bytes() if err != nil { return err } - return r.Client.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, - patchBytes), &client.PatchOptions{ - FieldManager: controllerName, - }) + return r.Writer.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patchBytes)) } // claimObject is responsible for adopting or releasing Objects based on their current @@ -129,7 +124,7 @@ func (r *Reconciler) getPostgresClusterForObject(ctx context.Context, } postgresCluster := &v1beta1.PostgresCluster{} - if err := r.Client.Get(ctx, types.NamespacedName{ + if err := r.Reader.Get(ctx, types.NamespacedName{ Name: clusterName, Namespace: obj.GetNamespace(), }, postgresCluster); err != nil { @@ -165,8 +160,8 @@ func (r *Reconciler) manageControllerRefs(ctx context.Context, func (r *Reconciler) releaseObject(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, obj client.Object) error { - // TODO create a strategic merge type in kubeapi instead of using Merge7386 - patch, err := kubeapi.NewMergePatch(). + // TODO create a strategic merge type instead of using Merge7386 + patch, err := runtime.NewMergePatch(). Add("metadata", "ownerReferences")([]map[string]string{{ "$patch": "delete", "uid": string(postgresCluster.GetUID()), @@ -175,7 +170,7 @@ func (r *Reconciler) releaseObject(ctx context.Context, return err } - return r.Client.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patch)) + return r.Writer.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patch)) } // controllerRefHandlerFuncs returns the handler funcs that should be utilized to watch diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index 7a60e4138a..2d84328562 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -22,7 +22,10 @@ func TestManageControllerRefs(t *testing.T) { require.ParallelCapacity(t, 1) ctx := context.Background() - r := &Reconciler{Client: tClient} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } clusterName := "hippo" cluster := testCluster() @@ -59,7 +62,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "adopt" obj.Labels = map[string]string{naming.LabelCluster: clusterName} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -100,7 +103,7 @@ func TestManageControllerRefs(t *testing.T) { BlockOwnerDeletion: &isTrue, }) - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -123,7 +126,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "ignore-no-labels-refs" obj.Labels = map[string]string{"ignore-label": "ignore-value"} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -146,7 +149,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "ignore-no-postgrescluster" obj.Labels = map[string]string{naming.LabelCluster: "nonexistent"} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 36759cd784..5b6f3e4c77 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -39,7 +38,7 @@ func TestDeleteControlled(t *testing.T) { require.ParallelCapacity(t, 1) ns := setupNamespace(t, cc) - reconciler := Reconciler{Client: cc} + reconciler := Reconciler{Writer: cc} cluster := testCluster() cluster.Namespace = ns.Name @@ -118,6 +117,7 @@ spec: var _ = Describe("PostgresCluster Reconciler", func() { var test struct { Namespace *corev1.Namespace + Owner string Reconciler Reconciler Recorder *record.FakeRecorder } @@ -129,13 +129,17 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Namespace.Name = "postgres-operator-test-" + rand.String(6) Expect(suite.Client.Create(ctx, test.Namespace)).To(Succeed()) + test.Owner = "asdf" test.Recorder = record.NewFakeRecorder(100) test.Recorder.IncludeObject = true - test.Reconciler.Client = suite.Client - test.Reconciler.Owner = "asdf" + client := client.WithFieldOwner(suite.Client, test.Owner) + + test.Reconciler.Reader = client test.Reconciler.Recorder = test.Recorder test.Reconciler.Registration = nil + test.Reconciler.StatusWriter = client.Status() + test.Reconciler.Writer = client }) AfterEach(func() { @@ -161,9 +165,7 @@ var _ = Describe("PostgresCluster Reconciler", func() { reconcile := func(cluster *v1beta1.PostgresCluster) reconcile.Result { ctx := context.Background() - result, err := test.Reconciler.Reconcile(ctx, - reconcile.Request{NamespacedName: client.ObjectKeyFromObject(cluster)}, - ) + result, err := test.Reconciler.Reconcile(ctx, cluster) Expect(err).ToNot(HaveOccurred(), func() string { var t interface{ StackTrace() errors.StackTrace } if errors.As(err, &t) { @@ -284,7 +286,7 @@ spec: )) Expect(ccm.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -308,7 +310,7 @@ spec: )) Expect(cps.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -337,59 +339,32 @@ spec: // // The "metadata.finalizers" field is also okay. // - https://book.kubebuilder.io/reference/using-finalizers.html - // - // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track - // managed fields on the status subresource: https://issue.k8s.io/88901 - switch { - case suite.ServerVersion.LessThan(version.MustParseGeneric("1.22")): - - // Kubernetes 1.22 began tracking subresources in managed fields. - // - https://pr.k8s.io/100970 - Expect(existing.ManagedFields).To(ContainElement( - MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), - "FieldsV1": PointTo(MatchAllFields(Fields{ - "Raw": WithTransform(func(in []byte) (out map[string]any) { - Expect(yaml.Unmarshal(in, &out)).To(Succeed()) - return out - }, MatchAllKeys(Keys{ - "f:metadata": MatchAllKeys(Keys{ - "f:finalizers": Not(BeZero()), - }), - "f:status": Not(BeZero()), - })), - })), - }), - ), `controller should manage only "finalizers" and "status"`) - - default: - Expect(existing.ManagedFields).To(ContainElements( - MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), - "FieldsV1": PointTo(MatchAllFields(Fields{ - "Raw": WithTransform(func(in []byte) (out map[string]any) { - Expect(yaml.Unmarshal(in, &out)).To(Succeed()) - return out - }, MatchAllKeys(Keys{ - "f:metadata": MatchAllKeys(Keys{ - "f:finalizers": Not(BeZero()), - }), - })), + Expect(existing.ManagedFields).To(ContainElements( + MatchFields(IgnoreExtras, Fields{ + "Manager": Equal(test.Owner), + "FieldsV1": PointTo(MatchAllFields(Fields{ + "Raw": WithTransform(func(in []byte) (out map[string]any) { + Expect(yaml.Unmarshal(in, &out)).To(Succeed()) + return out + }, MatchAllKeys(Keys{ + "f:metadata": MatchAllKeys(Keys{ + "f:finalizers": Not(BeZero()), + }), })), - }), - MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), - "FieldsV1": PointTo(MatchAllFields(Fields{ - "Raw": WithTransform(func(in []byte) (out map[string]any) { - Expect(yaml.Unmarshal(in, &out)).To(Succeed()) - return out - }, MatchAllKeys(Keys{ - "f:status": Not(BeZero()), - })), + })), + }), + MatchFields(IgnoreExtras, Fields{ + "Manager": Equal(test.Owner), + "FieldsV1": PointTo(MatchAllFields(Fields{ + "Raw": WithTransform(func(in []byte) (out map[string]any) { + Expect(yaml.Unmarshal(in, &out)).To(Succeed()) + return out + }, MatchAllKeys(Keys{ + "f:status": Not(BeZero()), })), - }), - ), `controller should manage only "finalizers" and "status"`) - } + })), + }), + ), `controller should manage only "finalizers" and "status"`) }) Specify("Patroni Distributed Configuration", func() { @@ -409,7 +384,7 @@ spec: )) Expect(ds.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -501,7 +476,7 @@ spec: )) Expect(icm.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -522,7 +497,7 @@ spec: )) Expect(instance.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) diff --git a/internal/controller/postgrescluster/delete.go b/internal/controller/postgrescluster/delete.go index a1a4d322dd..74a786dd38 100644 --- a/internal/controller/postgrescluster/delete.go +++ b/internal/controller/postgrescluster/delete.go @@ -58,7 +58,7 @@ func (r *Reconciler) handleDelete( // Make another copy so that Patch doesn't write back to cluster. intent := before.DeepCopy() intent.Finalizers = append(intent.Finalizers, naming.Finalizer) - err := errors.WithStack(r.patch(ctx, intent, + err := errors.WithStack(r.Writer.Patch(ctx, intent, client.MergeFromWithOptions(before, client.MergeFromWithOptimisticLock{}))) // The caller can do what they like or requeue upon error. @@ -96,7 +96,7 @@ func (r *Reconciler) handleDelete( // Make another copy so that Patch doesn't write back to cluster. intent := before.DeepCopy() intent.Finalizers = finalizers.Delete(naming.Finalizer).List() - err := errors.WithStack(r.patch(ctx, intent, + err := errors.WithStack(r.Writer.Patch(ctx, intent, client.MergeFromWithOptions(before, client.MergeFromWithOptimisticLock{}))) // The caller should wait for further events or requeue upon error. diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index a700aa1f95..3b0b9b58fe 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -300,14 +300,14 @@ func (r *Reconciler) observeInstances( selector, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) } if err == nil { err = errors.WithStack( - r.Client.List(ctx, runners, + r.Reader.List(ctx, runners, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -418,7 +418,7 @@ func (r *Reconciler) deleteInstances( instances, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: instances}, )) @@ -456,7 +456,7 @@ func (r *Reconciler) deleteInstances( // apps/v1.Deployment, apps/v1.ReplicaSet, and apps/v1.StatefulSet all // have a "spec.replicas" field with the same meaning. patch := client.RawPatch(client.Merge.Type(), []byte(`{"spec":{"replicas":0}}`)) - err := errors.WithStack(r.patch(ctx, instance, patch)) + err := errors.WithStack(r.Writer.Patch(ctx, instance, patch)) // When the pod controller is missing, requeue rather than return an // error. The garbage collector will stop the pod, and it is not our @@ -532,7 +532,7 @@ func (r *Reconciler) deleteInstance( uList.SetGroupVersionKind(gvk) err = errors.WithStack( - r.Client.List(ctx, uList, + r.Reader.List(ctx, uList, client.InNamespace(cluster.GetNamespace()), client.MatchingLabelsSelector{Selector: selector}, )) @@ -650,7 +650,7 @@ func (r *Reconciler) cleanupPodDisruptionBudgets( pdbList := &policyv1.PodDisruptionBudgetList{} if err == nil { - err = r.Client.List(ctx, pdbList, + err = r.Reader.List(ctx, pdbList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{ Selector: selector, }) @@ -847,7 +847,7 @@ func (r *Reconciler) rolloutInstance( // NOTE(cbandy): This could return an apierrors.IsConflict() which should be // retried by another reconcile (not ignored). return errors.WithStack( - r.Client.Delete(ctx, pod, client.Preconditions{ + r.Writer.Delete(ctx, pod, client.Preconditions{ UID: &pod.UID, ResourceVersion: &pod.ResourceVersion, })) @@ -1188,7 +1188,7 @@ func (r *Reconciler) reconcileInstance( // Create new err variable to avoid abandoning the rest of the reconcile loop if there // is an error getting the monitoring user secret err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(monitoringUserSecret), monitoringUserSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(monitoringUserSecret), monitoringUserSecret)) if err == nil { pgPassword = string(monitoringUserSecret.Data["password"]) } @@ -1205,7 +1205,7 @@ func (r *Reconciler) reconcileInstance( // TODO(sidecar): Create these directories sometime other than startup. collector.AddToPod(ctx, cluster.Spec.Instrumentation, cluster.Spec.ImagePullPolicy, instanceConfigMap, &instance.Spec.Template, []corev1.VolumeMount{postgres.DataVolumeMount()}, pgPassword, - []string{naming.PGBackRestPGDataLogPath}, includeLogrotate, true) + []string{util.GetPGBackRestLogPathForInstance(cluster)}, includeLogrotate, true) } // Add postgres-exporter to the instance Pod spec @@ -1433,7 +1433,7 @@ func (r *Reconciler) reconcileInstanceConfigMap( collector.AddLogrotateConfigs(ctx, cluster.Spec.Instrumentation, instanceConfigMap, []collector.LogrotateConfig{{ - LogFiles: []string{naming.PGBackRestPGDataLogPath + "/*.log"}, + LogFiles: []string{util.GetPGBackRestLogPathForInstance(cluster) + "/*.log"}, }}) } } @@ -1459,7 +1459,7 @@ func (r *Reconciler) reconcileInstanceCertificates( ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) instanceCerts := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} instanceCerts.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) @@ -1547,7 +1547,7 @@ func (r *Reconciler) reconcileInstanceSetPodDisruptionBudget( scaled, err = intstr.GetScaledValueFromIntOrPercent(minAvailable, int(*spec.Replicas), true) } if err == nil && scaled <= 0 { - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), pdb)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(pdb), pdb)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pdb)) } diff --git a/internal/controller/postgrescluster/instance_rollout_test.go b/internal/controller/postgrescluster/instance_rollout_test.go index 7bd63ce9d1..2b8f0db5f8 100644 --- a/internal/controller/postgrescluster/instance_rollout_test.go +++ b/internal/controller/postgrescluster/instance_rollout_test.go @@ -57,9 +57,12 @@ func TestReconcilerRolloutInstance(t *testing.T) { } observed := &observedInstances{forCluster: instances} - key := client.ObjectKey{Namespace: "ns1", Name: "one-pod-bruh"} - reconciler := &Reconciler{} - reconciler.Client = fake.NewClientBuilder().WithObjects(instances[0].Pods[0]).Build() + cc := fake.NewClientBuilder().WithObjects(instances[0].Pods[0]).Build() + key := client.ObjectKeyFromObject(instances[0].Pods[0]) + reconciler := &Reconciler{ + Reader: cc, + Writer: cc, + } execCalls := 0 reconciler.PodExec = func( @@ -82,13 +85,13 @@ func TestReconcilerRolloutInstance(t *testing.T) { return nil } - assert.NilError(t, reconciler.Client.Get(ctx, key, &corev1.Pod{}), + assert.NilError(t, cc.Get(ctx, key, &corev1.Pod{}), "bug in test: expected pod to exist") assert.NilError(t, reconciler.rolloutInstance(ctx, cluster, observed, instances[0])) assert.Equal(t, execCalls, 1, "expected PodExec to be called") - err := reconciler.Client.Get(ctx, key, &corev1.Pod{}) + err := cc.Get(ctx, key, &corev1.Pod{}) assert.Assert(t, apierrors.IsNotFound(err), "expected pod to be deleted, got: %#v", err) }) diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 1d17e4f9f3..f00267974b 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/collector" "github.com/crunchydata/postgres-operator/internal/controller/runtime" @@ -1213,33 +1212,32 @@ func TestDeleteInstance(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } // Define, Create, and Reconcile a cluster to get an instance running in kube cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) // Reconcile the entire cluster so that we don't have to create all the // resources needed to reconcile a single instance (cm,secrets,svc, etc.) - result, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) + result, err := reconciler.Reconcile(ctx, cluster) assert.NilError(t, err) assert.Assert(t, result.Requeue == false) stsList := &appsv1.StatefulSetList{} - assert.NilError(t, reconciler.Client.List(ctx, stsList, + assert.NilError(t, cc.List(ctx, stsList, client.InNamespace(cluster.Namespace), client.MatchingLabels{ naming.LabelCluster: cluster.Name, @@ -1272,7 +1270,7 @@ func TestDeleteInstance(t *testing.T) { err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -1816,8 +1814,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } foundPDB := func( @@ -1825,7 +1823,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec *v1beta1.PostgresInstanceSetSpec, ) bool { got := &policyv1.PodDisruptionBudget{} - err := r.Client.Get(ctx, + err := cc.Get(ctx, naming.AsObjectKey(naming.InstanceSet(cluster, spec)), got) return !apierrors.IsNotFound(err) @@ -1857,8 +1855,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) @@ -1884,8 +1882,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) @@ -1934,8 +1932,8 @@ func TestCleanupDisruptionBudgets(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -1964,14 +1962,14 @@ func TestCleanupDisruptionBudgets(t *testing.T) { createPDB := func( pdb *policyv1.PodDisruptionBudget, ) error { - return r.Client.Create(ctx, pdb) + return cc.Create(ctx, pdb) } foundPDB := func( pdb *policyv1.PodDisruptionBudget, ) bool { return !apierrors.IsNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), + cc.Get(ctx, client.ObjectKeyFromObject(pdb), &policyv1.PodDisruptionBudget{})) } @@ -1986,8 +1984,8 @@ func TestCleanupDisruptionBudgets(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) expectedPDB := generatePDB(t, cluster, spec, initialize.Pointer(intstr.FromInt32(1))) @@ -2031,8 +2029,7 @@ func TestReconcileInstanceConfigMap(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } t.Run("LocalVolumeOtelDisabled", func(t *testing.T) { diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index af3a3b8cca..7368fe295f 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -37,7 +37,7 @@ func (r *Reconciler) deletePatroniArtifacts( selector, err := naming.AsSelector(naming.ClusterPatronis(cluster)) if err == nil { err = errors.WithStack( - r.Client.DeleteAllOf(ctx, &corev1.Endpoints{}, + r.Writer.DeleteAllOf(ctx, &corev1.Endpoints{}, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -324,7 +324,7 @@ func (r *Reconciler) reconcilePatroniStatus( dcs := &corev1.Endpoints{ObjectMeta: naming.PatroniDistributedConfiguration(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(dcs), dcs))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(dcs), dcs))) if err == nil { if dcs.Annotations["initialize"] != "" { @@ -362,14 +362,14 @@ func (r *Reconciler) reconcileReplicationSecret( Name: cluster.Spec.CustomReplicationClientTLSSecret.Name, Namespace: cluster.Namespace, }} - err := errors.WithStack(r.Client.Get(ctx, + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(custom), custom)) return custom, err } existing := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) leaf := &pki.LeafCertificate{} commonName := postgres.ReplicationUser diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index b7fe885305..6968bd3259 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -32,11 +32,7 @@ import ( ) func TestGeneratePatroniLeaderLeaseService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -232,7 +228,7 @@ func TestReconcilePatroniLeaderLease(t *testing.T) { require.ParallelCapacity(t, 1) ns := setupNamespace(t, cc) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{Writer: client.WithFieldOwner(cc, t.Name())} cluster := testCluster() cluster.Namespace = ns.Name @@ -322,7 +318,10 @@ func TestPatroniReplicationSecret(t *testing.T) { require.ParallelCapacity(t, 0) ctx := context.Background() - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } // test postgrescluster values var ( @@ -351,7 +350,7 @@ func TestPatroniReplicationSecret(t *testing.T) { patroniReplicationSecret := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(postgresCluster)} patroniReplicationSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - err = r.Client.Get(ctx, client.ObjectKeyFromObject(patroniReplicationSecret), patroniReplicationSecret) + err = tClient.Get(ctx, client.ObjectKeyFromObject(patroniReplicationSecret), patroniReplicationSecret) assert.NilError(t, err) t.Run("ca.crt", func(t *testing.T) { @@ -426,7 +425,7 @@ func TestReconcilePatroniStatus(t *testing.T) { require.ParallelCapacity(t, 0) ns := setupNamespace(t, tClient) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} systemIdentifier := "6952526174828511264" createResources := func(index, readyReplicas int, @@ -526,13 +525,9 @@ func TestReconcilePatroniStatus(t *testing.T) { } func TestReconcilePatroniSwitchover(t *testing.T) { - _, client := setupKubernetes(t) - require.ParallelCapacity(t, 0) - var called, failover, callError, callFails bool var timelineCallNoLeader, timelineCall bool r := Reconciler{ - Client: client, PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { called = true diff --git a/internal/controller/postgrescluster/pgadmin.go b/internal/controller/postgrescluster/pgadmin.go index dbaaf359ee..fe5d4ce21d 100644 --- a/internal/controller/postgrescluster/pgadmin.go +++ b/internal/controller/postgrescluster/pgadmin.go @@ -96,7 +96,7 @@ func (r *Reconciler) reconcilePGAdminConfigMap( // pgAdmin is disabled; delete the ConfigMap if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(configmap) - err := errors.WithStack(r.Client.Get(ctx, key, configmap)) + err := errors.WithStack(r.Reader.Get(ctx, key, configmap)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, configmap)) } @@ -212,7 +212,7 @@ func (r *Reconciler) reconcilePGAdminService( // pgAdmin is disabled; delete the Service if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(service) - err := errors.WithStack(r.Client.Get(ctx, key, service)) + err := errors.WithStack(r.Reader.Get(ctx, key, service)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, service)) } @@ -240,7 +240,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // pgAdmin is disabled; delete the Deployment if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(sts) - err := errors.WithStack(r.Client.Get(ctx, key, sts)) + err := errors.WithStack(r.Reader.Get(ctx, key, sts)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, sts)) } @@ -333,7 +333,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { if !apierrors.IsNotFound(err) { return err } @@ -346,7 +346,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + return errors.WithStack(client.IgnoreNotFound(r.Writer.Delete(ctx, existing, exactly, propagate))) } } @@ -391,7 +391,7 @@ func (r *Reconciler) reconcilePGAdminDataVolume( // pgAdmin is disabled; delete the PVC if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) } @@ -439,7 +439,7 @@ func (r *Reconciler) reconcilePGAdminUsers( pod := &corev1.Pod{ObjectMeta: naming.ClusterPGAdmin(cluster)} pod.Name += "-0" - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(pod), pod)) if err != nil { return client.IgnoreNotFound(err) } diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index fc585d8952..bb81d90cf8 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -29,10 +29,7 @@ import ( ) func TestGeneratePGAdminConfigMap(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "some-ns" @@ -118,11 +115,7 @@ ownerReferences: } func TestGeneratePGAdminService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -354,7 +347,10 @@ func TestReconcilePGAdminService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -456,7 +452,10 @@ func TestReconcilePGAdminStatefulSet(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } ns := setupNamespace(t, cc) cluster := pgAdminTestCluster(*ns) @@ -670,8 +669,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(tClient, t.Name()), } ns := setupNamespace(t, tClient) @@ -721,7 +719,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { t.Run("NoPods", func(t *testing.T) { r := new(Reconciler) - r.Client = fake.NewClientBuilder().Build() + r.Reader = fake.NewClientBuilder().Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -737,7 +735,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { pod.Status.ContainerStatuses = nil r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -757,7 +755,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { new(corev1.ContainerStateRunning) r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -773,7 +771,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { new(corev1.ContainerStateRunning) r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() calls := 0 r.PodExec = func( diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 0acb86513f..52065093a2 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -8,8 +8,10 @@ import ( "context" "fmt" "io" + "path/filepath" "reflect" "regexp" + "slices" "sort" "strings" "time" @@ -38,6 +40,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/shell" "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -137,7 +140,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { + if err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { if !apierrors.IsNotFound(err) { return nil, err } @@ -150,7 +153,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return repo, errors.WithStack(r.Client.Delete(ctx, existing, exactly, propagate)) + return repo, errors.WithStack(r.Writer.Delete(ctx, existing, exactly, propagate)) } } @@ -248,7 +251,7 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - if err := r.Client.List(ctx, uList, + if err := r.Reader.List(ctx, uList, client.InNamespace(postgresCluster.GetNamespace()), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, errors.WithStack(err) @@ -398,7 +401,7 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, // If nothing has specified that the resource should not be deleted, then delete if delete { - if err := r.Client.Delete(ctx, &ownedResources[i], + if err := r.Writer.Delete(ctx, &ownedResources[i], client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return []unstructured.Unstructured{}, errors.WithStack(err) } @@ -821,7 +824,13 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, } } else { - container.Command = []string{"/bin/pgbackrest", "backup"} + mkdirCommand := "" + cloudLogPath := getCloudLogPath(postgresCluster) + if cloudLogPath != "" { + mkdirCommand += shell.MakeDirectories(cloudLogPath, cloudLogPath) + "; " + } + + container.Command = []string{"sh", "-c", "--", mkdirCommand + `exec "$@"`, "--", "/bin/pgbackrest", "backup"} container.Command = append(container.Command, cmdOpts...) } @@ -884,9 +893,29 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl jobSpec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template) - // Mount the PVC named in the "pgbackrest-cloud-log-volume" annotation, if any. - if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { - util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolumeName) + // If the "pgbackrest-cloud-log-volume" annotation has a value, check if it is the + // same as any of the additional volume names. If there is a collision of names, + // create a warning event. If there is no name collision, mount the volume referenced + // by the annotation. + if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { + var collisionFound bool + if jobs != nil && jobs.Volumes != nil { + for _, volume := range jobs.Volumes.Additional { + if volume.Name == logVolume { + collisionFound = true + r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, + "DuplicateCloudBackupVolume", "The volume name specified in the "+ + "pgbackrest-cloud-log-volume annotation is the same as one "+ + "specified in spec.backups.pgbackrest.jobs.volumes.additional. "+ + "Cannot mount duplicate volume names. Defaulting to the "+ + "additional volume.") + break + } + } + } + if !collisionFound { + util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolume) + } } } @@ -919,7 +948,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, // lookup the various patroni endpoints leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} currentEndpoints := []corev1.Endpoints{} - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -927,7 +956,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } else { currentEndpoints = append(currentEndpoints, leaderEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), &dcsEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -935,7 +964,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } else { currentEndpoints = append(currentEndpoints, dcsEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), &failoverEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -945,7 +974,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } restoreJobs := &batchv1.JobList{} - if err := r.Client.List(ctx, restoreJobs, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), }); err != nil { @@ -993,26 +1022,26 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, // by the restore job. Clean them up if they still exist. selector := naming.PGBackRestRestoreConfigSelector(cluster.GetName()) restoreConfigMaps := &corev1.ConfigMapList{} - if err := r.Client.List(ctx, restoreConfigMaps, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreConfigMaps, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: selector, }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreConfigMaps.Items { - if err := r.Client.Delete(ctx, &restoreConfigMaps.Items[i]); err != nil { + if err := r.Writer.Delete(ctx, &restoreConfigMaps.Items[i]); err != nil { return nil, nil, errors.WithStack(err) } } restoreSecrets := &corev1.SecretList{} - if err := r.Client.List(ctx, restoreSecrets, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreSecrets, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: selector, }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreSecrets.Items { - if err := r.Client.Delete(ctx, &restoreSecrets.Items[i]); err != nil { + if err := r.Writer.Delete(ctx, &restoreSecrets.Items[i]); err != nil { return nil, nil, errors.WithStack(err) } } @@ -1104,7 +1133,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, // remove any existing restore Jobs if restoreJob != nil { setPreparingClusterCondition("removing restore job") - if err := r.Client.Delete(ctx, restoreJob, + if err := r.Writer.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return errors.WithStack(err) } @@ -1114,7 +1143,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, if clusterRunning { setPreparingClusterCondition("removing runners") for _, runner := range runners { - err := r.Client.Delete(ctx, runner, + err := r.Writer.Delete(ctx, runner, client.PropagationPolicy(metav1.DeletePropagationForeground)) if client.IgnoreNotFound(err) != nil { return errors.WithStack(err) @@ -1145,7 +1174,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, setPreparingClusterCondition("removing DCS") // delete any Endpoints for i := range currentEndpoints { - if err := r.Client.Delete(ctx, ¤tEndpoints[i]); client.IgnoreNotFound(err) != nil { + if err := r.Writer.Delete(ctx, ¤tEndpoints[i]); client.IgnoreNotFound(err) != nil { return errors.WithStack(err) } } @@ -1162,35 +1191,36 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, pgdataVolume, pgwalVolume *corev1.PersistentVolumeClaim, pgtablespaceVolumes []*corev1.PersistentVolumeClaim, dataSource *v1beta1.PostgresClusterDataSource, - instanceName, instanceSetName, configHash, stanzaName string) error { - + instanceName, instanceSetName, configHash, stanzaName string, +) error { + hasFlag := make(map[string]bool) + matchFlag := regexp.MustCompile(`--[^ =]+`) repoName := dataSource.RepoName - options := dataSource.Options + + for _, input := range dataSource.Options { + for _, match := range matchFlag.FindAllString(input, -1) { + hasFlag[match] = true + } + } // ensure options are properly set // TODO (andrewlecuyer): move validation logic to a webhook - for _, opt := range options { + { var msg string switch { - // Since '--repo' can be set with or without an equals ('=') sign, we check for both - // usage patterns. - case strings.Contains(opt, "--repo=") || strings.Contains(opt, "--repo "): + case hasFlag["--repo"]: msg = "Option '--repo' is not allowed: please use the 'repoName' field instead." - case strings.Contains(opt, "--stanza"): - msg = "Option '--stanza' is not allowed: the operator will automatically set this " + - "option" - case strings.Contains(opt, "--pg1-path"): - msg = "Option '--pg1-path' is not allowed: the operator will automatically set this " + - "option" - case strings.Contains(opt, "--target-action"): - msg = "Option '--target-action' is not allowed: the operator will automatically set this " + - "option " - case strings.Contains(opt, "--link-map"): - msg = "Option '--link-map' is not allowed: the operator will automatically set this " + - "option " + case hasFlag["--stanza"]: + msg = "Option '--stanza' is not allowed: the operator will automatically set this option" + case hasFlag["--pg1-path"]: + msg = "Option '--pg1-path' is not allowed: the operator will automatically set this option" + case hasFlag["--target-action"]: + msg = "Option '--target-action' is not allowed: the operator will automatically set this option" + case hasFlag["--link-map"]: + msg = "Option '--link-map' is not allowed: the operator will automatically set this option" } if msg != "" { - r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "InvalidDataSource", msg, repoName) + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidDataSource", msg) return nil } } @@ -1198,27 +1228,12 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, pgdata := postgres.DataDirectory(cluster) // combine options provided by user in the spec with those populated by the operator for a // successful restore - opts := append(options, []string{ - "--stanza=" + stanzaName, - "--pg1-path=" + pgdata, - "--repo=" + regexRepoIndex.FindString(repoName)}...) - - // Look specifically for the "--target" flag, NOT flags that contain - // "--target" (e.g. "--target-timeline") - targetRegex, err := regexp.Compile("--target[ =]") - if err != nil { - return err - } - var deltaOptFound, foundTarget bool - for _, opt := range opts { - switch { - case targetRegex.MatchString(opt): - foundTarget = true - case strings.Contains(opt, "--delta"): - deltaOptFound = true - } - } - if !deltaOptFound { + opts := append(slices.Clone(dataSource.Options), shell.QuoteWords( + "--stanza="+stanzaName, + "--pg1-path="+pgdata, + "--repo="+regexRepoIndex.FindString(repoName), + )...) + if !hasFlag["--delta"] { opts = append(opts, "--delta") } @@ -1234,28 +1249,26 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, // - https://github.com/pgbackrest/pgbackrest/blob/bb03b3f41942d0b781931092a76877ad309001ef/src/command/restore/restore.c#L1623 // - https://github.com/pgbackrest/pgbackrest/issues/1314 // - https://github.com/pgbackrest/pgbackrest/issues/987 - if foundTarget { + if hasFlag["--target"] { opts = append(opts, "--target-action=promote") } for i, instanceSpec := range cluster.Spec.InstanceSets { if instanceSpec.Name == instanceSetName { - opts = append(opts, "--link-map=pg_wal="+postgres.WALDirectory(cluster, - &cluster.Spec.InstanceSets[i])) + opts = append(opts, "--link-map=pg_wal="+ + postgres.WALDirectory(cluster, &cluster.Spec.InstanceSets[i])) } } - // Check to see if huge pages have been requested in the spec. If they have, include 'huge_pages = try' - // in the restore command. If they haven't, include 'huge_pages = off'. - hugePagesSetting := "off" - if postgres.HugePagesRequested(cluster) { - hugePagesSetting = "try" + params := postgres.NewParameterSet() + postgres.SetHugePages(cluster, params) + if fetchKeyCommand := config.FetchKeyCommand(&cluster.Spec); fetchKeyCommand != "" { + params.Add("encryption_key_command", fetchKeyCommand) } // NOTE (andrewlecuyer): Forcing users to put each argument separately might prevent the need // to do any escaping or use eval. - cmd := pgbackrest.RestoreCommand(pgdata, hugePagesSetting, config.FetchKeyCommand(&cluster.Spec), - pgtablespaceVolumes, strings.Join(opts, " ")) + cmd := pgbackrest.RestoreCommand(cluster.Spec.PostgresVersion, pgdata, params, strings.Join(opts, " ")) // create the volume resources required for the postgres data directory dataVolumeMount := postgres.DataVolumeMount() @@ -1671,7 +1684,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, "PostgreSQL data for the cluster: %w", err) } } else { - if err := r.Client.Get(ctx, + if err := r.Reader.Get(ctx, client.ObjectKey{Name: sourceClusterName, Namespace: sourceClusterNamespace}, sourceCluster); err != nil { if apierrors.IsNotFound(err) { @@ -1873,7 +1886,7 @@ func (r *Reconciler) copyRestoreConfiguration(ctx context.Context, sourceConfig := &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(sourceCluster)} if err == nil { err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(sourceConfig), sourceConfig)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(sourceConfig), sourceConfig)) } // Retrieve the pgBackRest Secret of the source cluster if it has one. When @@ -1881,7 +1894,7 @@ func (r *Reconciler) copyRestoreConfiguration(ctx context.Context, sourceSecret := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(sourceCluster)} if err == nil { err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(sourceSecret), sourceSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(sourceSecret), sourceSecret)) if apierrors.IsNotFound(err) { sourceSecret, err = nil, nil @@ -1969,7 +1982,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, // Get the existing Secret for the copy, if it exists. It **must** // exist if not configured as optional. if secretProjection.Optional != nil && *secretProjection.Optional { - if err := errors.WithStack(r.Client.Get(ctx, secretName, + if err := errors.WithStack(r.Reader.Get(ctx, secretName, secretCopy)); apierrors.IsNotFound(err) { continue } else { @@ -1977,7 +1990,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, } } else { if err := errors.WithStack( - r.Client.Get(ctx, secretName, secretCopy)); err != nil { + r.Reader.Get(ctx, secretName, secretCopy)); err != nil { return err } } @@ -2023,7 +2036,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, // Get the existing ConfigMap for the copy, if it exists. It **must** // exist if not configured as optional. if configMapProjection.Optional != nil && *configMapProjection.Optional { - if err := errors.WithStack(r.Client.Get(ctx, configMapName, + if err := errors.WithStack(r.Reader.Get(ctx, configMapName, configMapCopy)); apierrors.IsNotFound(err) { continue } else { @@ -2031,7 +2044,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, } } else { if err := errors.WithStack( - r.Client.Get(ctx, configMapName, configMapCopy)); err != nil { + r.Reader.Get(ctx, configMapName, configMapCopy)); err != nil { return err } } @@ -2075,28 +2088,7 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { - // If the user has specified a PVC to use as a log volume for cloud backups via the - // PGBackRestCloudLogVolume annotation, check for the PVC. If we find it, set the cloud - // log path. If the user has specified a PVC, but we can't find it, create a warning event. - cloudLogPath := "" - if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { - logVolume := &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: logVolumeName, - Namespace: postgresCluster.GetNamespace(), - }, - } - err := errors.WithStack(r.Client.Get(ctx, - client.ObjectKeyFromObject(logVolume), logVolume)) - if err != nil { - // PVC not retrieved, create warning event - r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, - "PGBackRestCloudLogVolumeNotFound", err.Error()) - } else { - // We successfully found the specified PVC, so we will set the log path - cloudLogPath = "/volumes/" + logVolumeName - } - } + cloudLogPath := getCloudLogPath(postgresCluster) backrestConfig, err := pgbackrest.CreatePGBackRestConfigMapIntent(ctx, postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, cloudLogPath, instanceNames) @@ -2137,7 +2129,7 @@ func (r *Reconciler) reconcilePGBackRestSecret(ctx context.Context, existing := &corev1.Secret{} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(intent), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(intent), existing))) if err == nil { err = r.setControllerReference(cluster, intent) @@ -2413,7 +2405,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, // per a new value for the annotation (unless the user manually deletes the Job). if completed || failed { if manualAnnotation != "" && backupID != manualAnnotation { - return errors.WithStack(r.Client.Delete(ctx, currentBackupJob, + return errors.WithStack(r.Writer.Delete(ctx, currentBackupJob, client.PropagationPolicy(metav1.DeletePropagationBackground))) } } @@ -2686,7 +2678,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, if failed || replicaCreateRepoChanged || (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != containerName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { - if err := r.Client.Delete(ctx, job, + if err := r.Writer.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return errors.WithStack(err) } @@ -2756,7 +2748,7 @@ func (r *Reconciler) reconcileRepos(ctx context.Context, errors := []error{} errMsg := "reconciling repository volume" - repoVols := []*corev1.PersistentVolumeClaim{} + repoVols := make(map[string]*corev1.PersistentVolumeClaim) var replicaCreateRepo v1beta1.PGBackRestRepo if feature.Enabled(ctx, feature.AutoGrowVolumes) && pgbackrest.RepoHostVolumeDefined(postgresCluster) { @@ -2783,16 +2775,15 @@ func (r *Reconciler) reconcileRepos(ctx context.Context, // value to change later. spec.Resources.Limits = nil - repo, err := r.applyRepoVolumeIntent(ctx, postgresCluster, spec, + repoPVC, err := r.applyRepoVolumeIntent(ctx, postgresCluster, spec, repo.Name, repoResources) if err != nil { log.Error(err, errMsg) errors = append(errors, err) - continue - } - if repo != nil { - repoVols = append(repoVols, repo) } + // Store the repo volume after apply. If nil, that indicates a problem + // and the existing status should be preserved. + repoVols[repo.Name] = repoPVC } postgresCluster.Status.PGBackRest.Repos = @@ -2813,7 +2804,7 @@ func (r *Reconciler) writeRepoVolumeSizeRequestStatus(ctx context.Context, pods := &corev1.PodList{} if err := errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{ Selector: naming.PGBackRestDedicatedLabels(cluster.Name).AsSelector()}, @@ -2990,7 +2981,7 @@ func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { // existing/current status for any repos in the cluster, the repository volumes // (i.e. PVCs) reconciled for the cluster, and the hashes calculated for the configuration for any // external repositories defined for the cluster. -func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*corev1.PersistentVolumeClaim, +func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes map[string]*corev1.PersistentVolumeClaim, configHashes map[string]string, replicaCreateRepoName string) []v1beta1.RepoStatus { // the new repository status that will be generated and returned @@ -2998,11 +2989,18 @@ func getRepoVolumeStatus(repoStatus []v1beta1.RepoStatus, repoVolumes []*corev1. // Update the repo status based on the repo volumes (PVCs) that were reconciled. This includes // updating the status for any existing repository volumes, and adding status for any new - // repository volumes. - for _, rv := range repoVolumes { + // repository volumes. If there was a problem with the volume when an apply was attempted, + // the existing status is preserved. + for repoName, rv := range repoVolumes { newRepoVolStatus := true - repoName := rv.Labels[naming.LabelPGBackRestRepo] for _, rs := range repoStatus { + // Preserve the previous status if it exists and the apply failed. + if rs.Name == repoName && rv == nil { + updatedRepoStatus = append(updatedRepoStatus, rs) + newRepoVolStatus = false + break + } + // treat as new status if contains properties of a cloud (s3, gcr or azure) repo if rs.Name == repoName && rs.RepoOptionsHash == "" { newRepoVolStatus = false @@ -3326,7 +3324,7 @@ func (r *Reconciler) ObserveBackupUniverse(ctx context.Context, }, } err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) repoHostStatefulSetNotFound = apierrors.IsNotFound(err) // If we have an error that is not related to a missing repo-host StatefulSet, @@ -3351,3 +3349,24 @@ func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCl } return false } + +// getCloudLogPath is responsible for determining the appropriate log path for pgbackrest +// in cloud backup jobs. If the user specified a log path via the spec, use it. Otherwise, +// if the user specified a log volume for cloud backups via the PGBackRestCloudLogVolume +// annotation, we will use that. If neither scenario is true, return an empty string. +// +// This function assumes that the backups/pgbackrest spec is present in postgresCluster. +func getCloudLogPath(postgresCluster *v1beta1.PostgresCluster) string { + cloudLogPath := "" + if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil && + postgresCluster.Spec.Backups.PGBackRest.Jobs.Log != nil && + postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path != "" { + // TODO: I know it should be caught by CEL validation, but is it worthwhile to also + // check that Log.Path ~= "/volumes/" + existingAdditionalVolume.name here?? + + cloudLogPath = filepath.Clean(postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path) + } else if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { + cloudLogPath = "/volumes/" + logVolume + } + return cloudLogPath +} diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index d87223a2ee..a976ff9ff2 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -181,9 +182,9 @@ func TestReconcilePGBackRest(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -681,7 +682,7 @@ func TestReconcilePGBackRestRBAC(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -740,7 +741,7 @@ func TestReconcileRepoHostRBAC(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient, Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -807,9 +808,7 @@ func TestReconcileStanzaCreate(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -928,7 +927,7 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -1089,9 +1088,8 @@ func TestReconcileManualBackup(t *testing.T) { r := &Reconciler{} _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -1527,7 +1525,10 @@ func TestGetPGBackRestResources(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } clusterName := "hippocluster" clusterUID := "hippouid" @@ -1839,9 +1840,9 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: tClient, + Reader: tClient, Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(tClient, controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2218,9 +2219,9 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: tClient, + Reader: tClient, Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(tClient, controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2355,7 +2356,8 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { LabelSelector: naming.PGBackRestRestoreJobSelector(clusterName), Namespace: cluster.Namespace, })) - assert.Assert(t, tc.result.jobCount == len(restoreJobs.Items)) + assert.Equal(t, tc.result.jobCount, len(restoreJobs.Items), + "got:\n%s", require.Value(yaml.Marshal(restoreJobs.Items))) if len(restoreJobs.Items) == 1 { assert.Assert(t, restoreJobs.Items[0].Labels[naming.LabelStartupInstance] != "") assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestConfigHash] != "") @@ -2394,7 +2396,10 @@ func TestCopyConfigurationResources(t *testing.T) { ctx := context.Background() require.ParallelCapacity(t, 2) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns1 := setupNamespace(t, tClient) ns2 := setupNamespace(t, tClient) @@ -2643,8 +2648,7 @@ func TestGenerateBackupJobIntent(t *testing.T) { ns := setupNamespace(t, cc) r := &Reconciler{ - Client: cc, - Owner: controllerName, + Reader: cc, } ctx := context.Background() @@ -2663,6 +2667,11 @@ func TestGenerateBackupJobIntent(t *testing.T) { assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: + - sh + - -c + - -- + - exec "$@" + - -- - /bin/pgbackrest - backup - --stanza=db @@ -2965,6 +2974,12 @@ volumes: assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: + - sh + - -c + - -- + - mkdir -p '/volumes/another-pvc' && { chmod 0775 '/volumes/another-pvc' || :; }; + exec "$@" + - -- - /bin/pgbackrest - backup - --stanza=db @@ -3031,7 +3046,11 @@ volumes: cluster := cluster.DeepCopy() cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/stuff/log", + }, Volumes: &v1beta1.PGBackRestVolumesSpec{ Additional: []v1beta1.AdditionalVolume{ { @@ -3048,15 +3067,66 @@ volumes: nil, nil, ) - for _, container := range spec.Template.Spec.Containers { - assert.Assert(t, cmp.MarshalContains(container.VolumeMounts, - ` -- mountPath: /volumes/stuff - name: volumes-stuff`)) - } - - assert.Assert(t, cmp.MarshalContains(spec.Template.Spec.Volumes, - ` + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff/log' && { chmod 0775 '/volumes/stuff/log' || :; }; exec + "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp - name: volumes-stuff persistentVolumeClaim: claimName: additional-pvc`)) @@ -3064,14 +3134,317 @@ volumes: // No events created assert.Equal(t, len(recorder.Events), 0) }) + + t.Run("AdditionalVolumesMissingContainers", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + r.Recorder = recorder + + cluster := cluster.DeepCopy() + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/stuff/log", + }, + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "additional-pvc", + Containers: []v1beta1.DNS1123Label{ + "pgbackrest", + "non-existent-container", + }, + Name: "stuff", + }, + }, + }, + } + + spec := r.generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff/log' && { chmod 0775 '/volumes/stuff/log' || :; }; exec + "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp +- name: volumes-stuff + persistentVolumeClaim: + claimName: additional-pvc`)) + + // Missing containers warning event created + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "SpecifiedContainerNotFound") + assert.Equal(t, recorder.Events[0].Note, "The following Backup Job Pod "+ + "containers were specified for additional volumes but cannot be "+ + "found: [non-existent-container].") + }) + + t.Run("AnnotationAndAdditionalVolumeWithPath", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + r.Recorder = recorder + + cluster := cluster.DeepCopy() + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.PGBackRestCloudLogVolume] = "stuff" + + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/stuff/log", + }, + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "additional-pvc", + Name: "stuff", + }, + }, + }, + } + + spec := r.generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff/log' && { chmod 0775 '/volumes/stuff/log' || :; }; exec + "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp +- name: volumes-stuff + persistentVolumeClaim: + claimName: additional-pvc`)) + + // Annotation/additional volume collision warning event created + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "DuplicateCloudBackupVolume") + assert.Equal(t, recorder.Events[0].Note, "The volume name specified in "+ + "the pgbackrest-cloud-log-volume annotation is the same as one "+ + "specified in spec.backups.pgbackrest.jobs.volumes.additional. Cannot "+ + "mount duplicate volume names. Defaulting to the additional volume.") + }) + + t.Run("AnnotationAndAdditionalVolumeNoPath", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + r.Recorder = recorder + + cluster := cluster.DeepCopy() + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.PGBackRestCloudLogVolume] = "stuff" + + cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "additional-pvc", + Name: "stuff", + }, + }, + }, + } + + spec := r.generateBackupJobSpecIntent(ctx, + cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff' && { chmod 0775 '/volumes/stuff' || :; }; exec "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp +- name: volumes-stuff + persistentVolumeClaim: + claimName: additional-pvc`)) + + // Annotation/additional volume collision warning event created + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "DuplicateCloudBackupVolume") + assert.Equal(t, recorder.Events[0].Note, "The volume name specified in "+ + "the pgbackrest-cloud-log-volume annotation is the same as one "+ + "specified in spec.backups.pgbackrest.jobs.volumes.additional. Cannot "+ + "mount duplicate volume names. Defaulting to the additional volume.") + }) } func TestGenerateRepoHostIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ctx := context.Background() - r := Reconciler{Client: cc} + r := Reconciler{} t.Run("empty", func(t *testing.T) { _, err := r.generateRepoHostIntent(ctx, &v1beta1.PostgresCluster{}, "", &RepoResources{}, @@ -3157,12 +3530,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { } func TestGenerateRestoreJobIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - r := Reconciler{ - Client: cc, - } + r := Reconciler{} t.Run("empty", func(t *testing.T) { err := r.generateRestoreJobIntent(&v1beta1.PostgresCluster{}, "", "", @@ -3384,7 +3752,7 @@ func TestObserveRestoreEnv(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string, completed, failed *bool) *batchv1.Job { @@ -3484,18 +3852,18 @@ func TestObserveRestoreEnv(t *testing.T) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, fakeFailoverEP)) job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3508,15 +3876,15 @@ func TestObserveRestoreEnv(t *testing.T) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, fakeFailoverEP)) }, result: testResult{ foundRestoreJob: false, @@ -3527,7 +3895,7 @@ func TestObserveRestoreEnv(t *testing.T) { desc: "restore job only exists", createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3541,8 +3909,8 @@ func TestObserveRestoreEnv(t *testing.T) { t.Skip("requires mocking of Job conditions") } job := generateJob(cluster.Name, initialize.Bool(true), nil) - assert.NilError(t, r.Client.Create(ctx, job.DeepCopy())) - assert.NilError(t, r.Client.Status().Update(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job.DeepCopy())) + assert.NilError(t, tClient.Status().Update(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3561,8 +3929,8 @@ func TestObserveRestoreEnv(t *testing.T) { t.Skip("requires mocking of Job conditions") } job := generateJob(cluster.Name, nil, initialize.Bool(true)) - assert.NilError(t, r.Client.Create(ctx, job.DeepCopy())) - assert.NilError(t, r.Client.Status().Update(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job.DeepCopy())) + assert.NilError(t, tClient.Status().Update(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3612,7 +3980,9 @@ func TestPrepareForRestore(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Writer: client.WithFieldOwner(tClient, t.Name()), + } namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string) *batchv1.Job { @@ -3666,7 +4036,7 @@ func TestPrepareForRestore(t *testing.T) { createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) (*batchv1.Job, []corev1.Endpoints) { job := generateJob(cluster.Name) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) return job, nil }, result: testResult{ @@ -3686,15 +4056,15 @@ func TestPrepareForRestore(t *testing.T) { fakeLeaderEP := corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, &fakeLeaderEP)) fakeDCSEP := corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, &fakeDCSEP)) fakeFailoverEP := corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, &fakeFailoverEP)) return nil, []corev1.Endpoints{fakeLeaderEP, fakeDCSEP, fakeFailoverEP} }, result: testResult{ @@ -3801,19 +4171,19 @@ func TestPrepareForRestore(t *testing.T) { leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} currentEndpoints := []corev1.Endpoints{} - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { currentEndpoints = append(currentEndpoints, leaderEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), &dcsEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { currentEndpoints = append(currentEndpoints, dcsEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), &failoverEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { @@ -3821,7 +4191,7 @@ func TestPrepareForRestore(t *testing.T) { } restoreJobs := &batchv1.JobList{} - assert.NilError(t, r.Client.List(ctx, restoreJobs, &client.ListOptions{ + assert.NilError(t, tClient.List(ctx, restoreJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), })) @@ -3857,9 +4227,9 @@ func TestReconcileScheduledBackups(t *testing.T) { r := &Reconciler{} _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -4120,7 +4490,7 @@ func TestSetScheduledJobStatus(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} clusterName := "hippocluster" clusterUID := "hippouid" @@ -4193,9 +4563,9 @@ func TestBackupsEnabled(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -4351,8 +4721,7 @@ func TestGetRepoHostVolumeRequests(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Reader: tClient, Recorder: new(record.FakeRecorder), } @@ -4497,3 +4866,53 @@ func TestGetRepoHostVolumeRequests(t *testing.T) { }) } } + +func TestGetCloudLogPath(t *testing.T) { + t.Run("NoAnnotationNoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{} + assert.Equal(t, getCloudLogPath(postgrescluster), "") + }) + + t.Run("AnnotationSetNoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{} + postgrescluster.Annotations = map[string]string{} + postgrescluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/another-pvc") + }) + + t.Run("NoAnnotationSpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Jobs: &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log/", + }, + }, + }, + }, + }, + } + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/test/log") + }) + + t.Run("BothAnnotationAndSpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Jobs: &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log/", + }, + }, + }, + }, + }, + } + postgrescluster.Annotations = map[string]string{} + postgrescluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/test/log") + }) +} diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index 8b74f20a67..56203189d0 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -97,7 +97,7 @@ func (r *Reconciler) reconcilePGBouncerConfigMap( // PgBouncer is disabled; delete the ConfigMap if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(configmap) - err := errors.WithStack(r.Client.Get(ctx, key, configmap)) + err := errors.WithStack(r.Reader.Get(ctx, key, configmap)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, configmap)) } @@ -230,7 +230,7 @@ func (r *Reconciler) reconcilePGBouncerSecret( ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.ClusterPGBouncer(cluster)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -374,7 +374,7 @@ func (r *Reconciler) reconcilePGBouncerService( // PgBouncer is disabled; delete the Service if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(service) - err := errors.WithStack(r.Client.Get(ctx, key, service)) + err := errors.WithStack(r.Reader.Get(ctx, key, service)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, service)) } @@ -565,7 +565,7 @@ func (r *Reconciler) reconcilePGBouncerDeployment( // PgBouncer is disabled; delete the Deployment if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(deploy) - err := errors.WithStack(r.Client.Get(ctx, key, deploy)) + err := errors.WithStack(r.Reader.Get(ctx, key, deploy)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, deploy)) } @@ -590,7 +590,7 @@ func (r *Reconciler) reconcilePGBouncerPodDisruptionBudget( ) error { deleteExistingPDB := func(cluster *v1beta1.PostgresCluster) error { existing := &policyv1.PodDisruptionBudget{ObjectMeta: naming.ClusterPGBouncer(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, existing)) } diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index 26f6637ead..78527131ed 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -27,11 +27,7 @@ import ( ) func TestGeneratePGBouncerService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -263,7 +259,10 @@ func TestReconcilePGBouncerService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -365,11 +364,8 @@ func TestReconcilePGBouncerService(t *testing.T) { } func TestGeneratePGBouncerDeployment(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ctx := context.Background() - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns3" @@ -548,15 +544,15 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } foundPDB := func( cluster *v1beta1.PostgresCluster, ) bool { got := &policyv1.PodDisruptionBudget{} - err := r.Client.Get(ctx, + err := cc.Get(ctx, naming.AsObjectKey(naming.ClusterPGBouncer(cluster)), got) return !apierrors.IsNotFound(err) @@ -595,8 +591,8 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) @@ -622,8 +618,8 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index 9a6043f868..e30bf3f56f 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -153,7 +153,7 @@ func (r *Reconciler) reconcileMonitoringSecret( existing := &corev1.Secret{ObjectMeta: naming.MonitoringUserSecret(cluster)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -380,7 +380,7 @@ func (r *Reconciler) reconcileExporterWebConfig(ctx context.Context, } existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterWebConfigMap(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -439,7 +439,7 @@ func (r *Reconciler) reconcileExporterQueriesConfig(ctx context.Context, cluster *v1beta1.PostgresCluster) (*corev1.ConfigMap, error) { existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterQueriesConfigMap(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index e4ccaf0d9f..e91b176ec0 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -702,7 +702,10 @@ func TestReconcileMonitoringSecret(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Default() @@ -776,7 +779,10 @@ func TestReconcileExporterQueriesConfig(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Default() diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index d52d6a75da..0e686d4f72 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -42,7 +42,7 @@ func (r *Reconciler) reconcileRootCertificate( existing := &corev1.Secret{} existing.Namespace, existing.Name = cluster.Namespace, naming.RootCertSecret err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) root := &pki.RootCertificateAuthority{} @@ -120,7 +120,7 @@ func (r *Reconciler) reconcileClusterCertificate( existing := &corev1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) leaf := &pki.LeafCertificate{} dnsNames := append(naming.ServiceDNSNames(ctx, primaryService), naming.ServiceDNSNames(ctx, replicaService)...) diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index ed74b1220b..b61e983258 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -42,8 +42,8 @@ func TestReconcileCerts(t *testing.T) { namespace := setupNamespace(t, tClient).Name r := &Reconciler{ - Client: tClient, - Owner: controllerName, + Reader: tClient, + Writer: client.WithFieldOwner(tClient, controllerName), } // set up cluster1 diff --git a/internal/controller/postgrescluster/pod_disruption_budget_test.go b/internal/controller/postgrescluster/pod_disruption_budget_test.go index 6463068d4c..e8cbffc19a 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget_test.go +++ b/internal/controller/postgrescluster/pod_disruption_budget_test.go @@ -13,14 +13,11 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGeneratePodDisruptionBudget(t *testing.T) { - _, cc := setupKubernetes(t) - r := &Reconciler{Client: cc} - require.ParallelCapacity(t, 0) + r := &Reconciler{} var ( minAvailable *intstr.IntOrString diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index d45e944039..33907043fb 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -134,7 +134,7 @@ func (r *Reconciler) generatePostgresParameters( pgaudit.PostgreSQLParameters(&builtin) pgbackrest.PostgreSQLParameters(cluster, &builtin, backupsSpecFound) pgmonitor.PostgreSQLParameters(ctx, cluster, &builtin) - postgres.SetHugePages(cluster, &builtin) + postgres.SetHugePages(cluster, builtin.Default) // Last write wins, so start with the recommended defaults. result := cmp.Or(builtin.Default.DeepCopy(), postgres.NewParameterSet()) @@ -542,7 +542,7 @@ func (r *Reconciler) reconcilePostgresUserSecrets( selector, err := naming.AsSelector(naming.ClusterPostgresUsers(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, secrets, + r.Reader.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -898,7 +898,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( // No WAL volume is specified; delete the PVC safely if it exists. Check // the client cache first using Get. key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err != nil { return nil, client.IgnoreNotFound(err) } @@ -1003,7 +1003,7 @@ func (r *Reconciler) reconcileDatabaseInitSQL(ctx context.Context, Namespace: cluster.Namespace, }, } - err := r.Client.Get(ctx, client.ObjectKeyFromObject(cm), cm) + err := r.Reader.Get(ctx, client.ObjectKeyFromObject(cm), cm) if err != nil { return "", err } diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 48591d8d49..7754f73c4f 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -283,10 +283,7 @@ func TestGeneratePostgresParameters(t *testing.T) { } func TestGeneratePostgresUserSecret(t *testing.T) { - _, tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: tClient} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns1" @@ -484,8 +481,8 @@ func TestReconcilePostgresVolumes(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), } t.Run("DataVolumeNoSourceCluster", func(t *testing.T) { @@ -588,7 +585,7 @@ volumeMode: Filesystem assert.NilError(t, err) // Get snapshot and update Status.ReadyToUse and CreationTime - err = reconciler.Client.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) + err = tClient.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) assert.NilError(t, err) currentTime := metav1.Now() @@ -596,7 +593,7 @@ volumeMode: Filesystem ReadyToUse: initialize.Bool(true), CreationTime: ¤tTime, } - err = reconciler.Client.Status().Update(ctx, snapshot) + err = tClient.Status().Update(ctx, snapshot) assert.NilError(t, err) // Reconcile volume @@ -861,7 +858,7 @@ func TestReconcileDatabaseInitSQL(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: client, + Reader: client, // Overwrite the PodExec function with a check to ensure the exec // call would have been made @@ -985,7 +982,7 @@ func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: client, + Reader: client, // Overwrite the PodExec function with a check to ensure the exec // call would have been made diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index a16bd650fd..74e506f45a 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -196,7 +196,7 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( // Check the client cache first using Get. if cluster.Spec.Backups.Snapshots == nil { key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) } @@ -263,13 +263,13 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( patch := client.RawPatch(client.Merge.Type(), []byte(annotations)) err = r.handlePersistentVolumeClaimError(cluster, - errors.WithStack(r.patch(ctx, pvc, patch))) + errors.WithStack(r.Writer.Patch(ctx, pvc, patch))) if err != nil { return pvc, err } - err = r.Client.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) + err = r.Writer.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) return pvc, errors.WithStack(err) } @@ -459,7 +459,7 @@ func (r *Reconciler) getDedicatedSnapshotVolumeRestoreJob(ctx context.Context, selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(postgrescluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, jobs, + r.Reader.List(ctx, jobs, client.InNamespace(postgrescluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -489,7 +489,7 @@ func (r *Reconciler) getLatestCompleteBackupJob(ctx context.Context, selectJobs, err := naming.AsSelector(naming.ClusterBackupJobs(postgrescluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, jobs, + r.Reader.List(ctx, jobs, client.InNamespace(postgrescluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -555,7 +555,7 @@ func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta } snapshots := &volumesnapshotv1.VolumeSnapshotList{} err = errors.WithStack( - r.Client.List(ctx, snapshots, + r.Reader.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index af5d4d1247..83efcad704 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -39,9 +39,9 @@ func TestReconcileVolumeSnapshots(t *testing.T) { recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, Recorder: recorder, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -60,8 +60,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create a snapshot pvc := &corev1.PersistentVolumeClaim{ @@ -72,14 +72,14 @@ func TestReconcileVolumeSnapshots(t *testing.T) { volumeSnapshotClassName := "my-snapshotclass" snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) assert.NilError(t, err) - assert.NilError(t, r.Client.Create(ctx, snapshot)) + assert.NilError(t, cc.Create(ctx, snapshot)) // Get all snapshots for this cluster and assert 1 exists selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -91,7 +91,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Get all snapshots for this cluster and assert 0 exist snapshots = &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -147,8 +147,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -156,8 +156,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc for reconcile pvc := &corev1.PersistentVolumeClaim{ @@ -174,7 +174,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -193,8 +193,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -203,8 +203,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc with annotation pvcName := initialize.String("dedicated-snapshot-volume") @@ -240,14 +240,14 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot1)) - assert.NilError(t, r.Client.Create(ctx, snapshot1)) + assert.NilError(t, cc.Create(ctx, snapshot1)) // Update snapshot status truePtr := initialize.Bool(true) snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - assert.NilError(t, r.Client.Status().Update(ctx, snapshot1)) + assert.NilError(t, cc.Status().Update(ctx, snapshot1)) // Create second snapshot with different annotation value snapshot2 := &volumesnapshotv1.VolumeSnapshot{ @@ -272,13 +272,13 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) // Update second snapshot's status snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - assert.NilError(t, r.Client.Status().Update(ctx, snapshot2)) + assert.NilError(t, cc.Status().Update(ctx, snapshot2)) // Reconcile assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) @@ -288,7 +288,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -308,8 +308,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -318,8 +318,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc with annotation pvcName := initialize.String("dedicated-snapshot-volume") @@ -340,7 +340,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -356,9 +356,9 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, Recorder: recorder, + Writer: client.WithFieldOwner(cc, t.Name()), } // Enable snapshots feature gate @@ -374,8 +374,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create a dedicated snapshot volume pvc := &corev1.PersistentVolumeClaim{ @@ -396,14 +396,14 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { spec := testVolumeClaimSpec() pvc.Spec = spec.AsPersistentVolumeClaimSpec() assert.NilError(t, r.setControllerReference(cluster, pvc)) - assert.NilError(t, r.Client.Create(ctx, pvc)) + assert.NilError(t, cc.Create(ctx, pvc)) // Assert that the pvc was created selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} assert.NilError(t, - r.Client.List(ctx, pvcs, + cc.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) @@ -419,7 +419,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Assert that the pvc has been deleted or marked for deletion key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} - if err := r.Client.Get(ctx, key, fetched); err == nil { + if err := cc.Get(ctx, key, fetched); err == nil { assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") } else { assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) @@ -435,8 +435,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create volumes for reconcile clusterVolumes := []*corev1.PersistentVolumeClaim{} @@ -451,7 +451,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} assert.NilError(t, - r.Client.List(ctx, pvcs, + cc.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) @@ -470,18 +470,18 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) currentTime := metav1.Now() startTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) backupJob.Status = succeededJobStatus(startTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -498,7 +498,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, restoreJobs, + cc.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -518,8 +518,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create times for jobs currentTime := metav1.Now() @@ -530,10 +530,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) backupJob.Status = succeededJobStatus(earlierStartTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create successful restore job restoreJob := testRestoreJob(cluster) @@ -541,10 +541,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } assert.NilError(t, r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, r.Client.Create(ctx, restoreJob)) + assert.NilError(t, cc.Create(ctx, restoreJob)) restoreJob.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) + assert.NilError(t, cc.Status().Update(ctx, restoreJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -561,7 +561,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, restoreJobs, + cc.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -583,8 +583,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create times for jobs currentTime := metav1.Now() @@ -594,10 +594,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) backupJob.Status = succeededJobStatus(startTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create failed restore job restoreJob := testRestoreJob(cluster) @@ -605,13 +605,13 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } assert.NilError(t, r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, r.Client.Create(ctx, restoreJob)) + assert.NilError(t, cc.Create(ctx, restoreJob)) restoreJob.Status = batchv1.JobStatus{ Succeeded: 0, Failed: 1, } - assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) + assert.NilError(t, cc.Status().Update(ctx, restoreJob)) // Setup instances and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -639,8 +639,7 @@ func TestCreateDedicatedSnapshotVolume(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -667,8 +666,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -695,7 +693,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, jobs, + cc.List(ctx, jobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -709,8 +707,7 @@ func TestGenerateSnapshotOfDedicatedSnapshotVolume(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, } ns := setupNamespace(t, cc) @@ -740,8 +737,7 @@ func TestGenerateVolumeSnapshot(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, } ns := setupNamespace(t, cc) @@ -769,8 +765,8 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -787,7 +783,7 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { job1 := testRestoreJob(cluster) job1.Namespace = ns.Name - err := r.Client.Create(ctx, job1) + err := cc.Create(ctx, job1) assert.NilError(t, err) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) @@ -803,14 +799,14 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { naming.PGBackRestBackupJobCompletion: "backup-timestamp", } - err := r.Client.Create(ctx, job2) + err := cc.Create(ctx, job2) assert.NilError(t, err) job3 := testRestoreJob(cluster) job3.Name = "restore-job-3" job3.Namespace = ns.Name - assert.NilError(t, r.Client.Create(ctx, job3)) + assert.NilError(t, cc.Create(ctx, job3)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -824,8 +820,8 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -842,7 +838,7 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job1 := testBackupJob(cluster) job1.Namespace = ns.Name - err := r.Client.Create(ctx, job1) + err := cc.Create(ctx, job1) assert.NilError(t, err) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) @@ -867,13 +863,13 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job2.Namespace = ns.Name job2.Name = "backup-job-2" - assert.NilError(t, r.Client.Create(ctx, job2)) + assert.NilError(t, cc.Create(ctx, job2)) // Get job1 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, job1)) + assert.NilError(t, cc.Status().Update(ctx, job1)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -903,16 +899,16 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, job1)) + assert.NilError(t, cc.Status().Update(ctx, job1)) // Get job2 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job2), job2)) job2.Status = succeededJobStatus(earlierStartTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, job2)) + assert.NilError(t, cc.Status().Update(ctx, job2)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -1024,8 +1020,8 @@ func TestGetSnapshotsForCluster(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -1054,7 +1050,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - assert.NilError(t, r.Client.Create(ctx, snapshot)) + assert.NilError(t, cc.Create(ctx, snapshot)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1095,7 +1091,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1242,25 +1238,24 @@ func TestDeleteSnapshots(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) rhinoCluster := testCluster() rhinoCluster.Name = "rhino" rhinoCluster.Namespace = ns.Name rhinoCluster.UID = "the-uid-456" - assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) + assert.NilError(t, cc.Create(ctx, rhinoCluster)) t.Cleanup(func() { - assert.Check(t, r.Client.Delete(ctx, cluster)) - assert.Check(t, r.Client.Delete(ctx, rhinoCluster)) + assert.Check(t, cc.Delete(ctx, cluster)) + assert.Check(t, cc.Delete(ctx, rhinoCluster)) }) t.Run("NoSnapshots", func(t *testing.T) { @@ -1287,7 +1282,7 @@ func TestDeleteSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, r.Client.Create(ctx, snapshot1)) + assert.NilError(t, cc.Create(ctx, snapshot1)) snapshots := []*volumesnapshotv1.VolumeSnapshot{ snapshot1, @@ -1295,7 +1290,7 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, existingSnapshots, + cc.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) assert.Equal(t, len(existingSnapshots.Items), 1) @@ -1337,7 +1332,7 @@ func TestDeleteSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) snapshots := []*volumesnapshotv1.VolumeSnapshot{ snapshot1, snapshot2, @@ -1345,7 +1340,7 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, existingSnapshots, + cc.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) assert.Equal(t, len(existingSnapshots.Items), 1) diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index ffb9d6f1eb..f109839028 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -6,18 +6,12 @@ package postgrescluster import ( "context" - "os" - "strings" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/util/version" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/testing/require" @@ -25,11 +19,6 @@ import ( var suite struct { Client client.Client - Config *rest.Config - - ServerVersion *version.Version - - Manager manager.Manager } func TestAPIs(t *testing.T) { @@ -39,24 +28,10 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func() { - if os.Getenv("KUBEBUILDER_ASSETS") == "" && !strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { - Skip("skipping") - } + suite.Client = require.Kubernetes(GinkgoT()) logging.SetLogSink(logging.Logrus(GinkgoWriter, "test", 1, 1)) log.SetLogger(logging.FromContext(context.Background())) - - By("bootstrapping test environment") - suite.Config, suite.Client = require.Kubernetes2(GinkgoT()) - - dc, err := discovery.NewDiscoveryClientForConfig(suite.Config) - Expect(err).ToNot(HaveOccurred()) - - server, err := dc.ServerVersion() - Expect(err).ToNot(HaveOccurred()) - - suite.ServerVersion, err = version.ParseGeneric(server.GitVersion) - Expect(err).ToNot(HaveOccurred()) }) var _ = AfterSuite(func() { diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index a26fa05e78..919633377f 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -7,7 +7,6 @@ package postgrescluster import ( "context" "fmt" - "strconv" "github.com/pkg/errors" batchv1 "k8s.io/api/batch/v1" @@ -18,7 +17,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -41,7 +39,7 @@ func (r *Reconciler) observePersistentVolumeClaims( selector, err := naming.AsSelector(naming.Cluster(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, volumes, + r.Reader.List(ctx, volumes, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -392,7 +390,7 @@ func (r *Reconciler) reconcileDirMoveJobs(ctx context.Context, cluster.Spec.DataSource.Volumes != nil { var list batchv1.JobList - if err := r.Client.List(ctx, &list, &client.ListOptions{ + if err := r.Reader.List(ctx, &list, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), }); err != nil { @@ -477,21 +475,20 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // `patroni.dynamic.json` holds the previous state of the DCS. Since we are // migrating the volumes, we want to clear out any obsolete configuration info. script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" - echo "pgdata_pvc=%s" - echo "Current PG data directory volume contents:" - ls -lh "/pgdata" - echo "Now updating PG data directory..." - [ -d "/pgdata/%s" ] && mv "/pgdata/%s" "/pgdata/pg%s_bootstrap" - rm -f "/pgdata/pg%s/patroni.dynamic.json" - echo "Updated PG data directory contents:" - ls -lh "/pgdata" - echo "PG Data directory preparation complete" - `, cluster.Name, +echo "pgdata_pvc=%s" +echo "Current PG data directory volume contents:" +ls -lh "/pgdata" +echo "Now updating PG data directory..." +[ -d "/pgdata/%s" ] && mv "/pgdata/%s" "/pgdata/pg%d_bootstrap" +rm -f "/pgdata/pg%d/patroni.dynamic.json" +echo "Updated PG data directory contents:" +ls -lh "/pgdata" +echo "PG Data directory preparation complete"`, cluster.Name, cluster.Spec.DataSource.Volumes.PGDataVolume.PVCName, cluster.Spec.DataSource.Volumes.PGDataVolume.Directory, cluster.Spec.DataSource.Volumes.PGDataVolume.Directory, - strconv.Itoa(cluster.Spec.PostgresVersion), - strconv.Itoa(cluster.Spec.PostgresVersion)) + cluster.Spec.PostgresVersion, + cluster.Spec.PostgresVersion) container := corev1.Container{ Command: []string{"bash", "-ceu", script}, @@ -547,8 +544,7 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } @@ -598,15 +594,14 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" - echo "pg_wal_pvc=%s" - echo "Current PG WAL directory volume contents:" - ls -lh "/pgwal" - echo "Now updating PG WAL directory..." - [ -d "/pgwal/%s" ] && mv "/pgwal/%s" "/pgwal/%s-wal" - echo "Updated PG WAL directory contents:" - ls -lh "/pgwal" - echo "PG WAL directory preparation complete" - `, cluster.Name, +echo "pg_wal_pvc=%s" +echo "Current PG WAL directory volume contents:" +ls -lh "/pgwal" +echo "Now updating PG WAL directory..." +[ -d "/pgwal/%s" ] && mv "/pgwal/%s" "/pgwal/%s-wal" +echo "Updated PG WAL directory contents:" +ls -lh "/pgwal" +echo "PG WAL directory preparation complete"`, cluster.Name, cluster.Spec.DataSource.Volumes.PGWALVolume.PVCName, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, @@ -666,8 +661,7 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } @@ -717,18 +711,17 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s pgBackRest repo volume for PGO v5.x" - echo "repo_pvc=%s" - echo "pgbackrest directory:" - ls -lh /pgbackrest - echo "Current pgBackRest repo directory volume contents:" - ls -lh "/pgbackrest/%s" - echo "Now updating repo directory..." - [ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/archive" - [ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/backup" - echo "Updated /pgbackrest directory contents:" - ls -lh "/pgbackrest" - echo "Repo directory preparation complete" - `, cluster.Name, +echo "repo_pvc=%s" +echo "pgbackrest directory:" +ls -lh /pgbackrest +echo "Current pgBackRest repo directory volume contents:" +ls -lh "/pgbackrest/%s" +echo "Now updating repo directory..." +[ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/archive" +[ -d "/pgbackrest/%s" ] && mv -t "/pgbackrest/" "/pgbackrest/%s/backup" +echo "Updated /pgbackrest directory contents:" +ls -lh "/pgbackrest" +echo "Repo directory preparation complete"`, cluster.Name, cluster.Spec.DataSource.Volumes.PGBackRestVolume.PVCName, cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, cluster.Spec.DataSource.Volumes.PGBackRestVolume.Directory, @@ -788,8 +781,7 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index c579e3f578..4404164610 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -375,7 +375,10 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns := setupNamespace(t, tClient) cluster := &v1beta1.PostgresCluster{ @@ -637,7 +640,10 @@ func TestReconcileMoveDirectories(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns := setupNamespace(t, tClient) cluster := &v1beta1.PostgresCluster{ @@ -732,7 +738,7 @@ func TestReconcileMoveDirectories(t *testing.T) { assert.Assert(t, returnEarly) moveJobs := &batchv1.JobList{} - err = r.Client.List(ctx, moveJobs, &client.ListOptions{ + err = tClient.List(ctx, moveJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), }) @@ -748,12 +754,17 @@ containers: - command: - bash - -ceu - - "echo \"Preparing cluster testcluster volumes for PGO v5.x\"\n echo \"pgdata_pvc=testpgdata\"\n - \ echo \"Current PG data directory volume contents:\" \n ls -lh \"/pgdata\"\n - \ echo \"Now updating PG data directory...\"\n [ -d \"/pgdata/testpgdatadir\" - ] && mv \"/pgdata/testpgdatadir\" \"/pgdata/pg13_bootstrap\"\n rm -f \"/pgdata/pg13/patroni.dynamic.json\"\n - \ echo \"Updated PG data directory contents:\" \n ls -lh \"/pgdata\"\n echo - \"PG Data directory preparation complete\"\n " + - |- + echo "Preparing cluster testcluster volumes for PGO v5.x" + echo "pgdata_pvc=testpgdata" + echo "Current PG data directory volume contents:" + ls -lh "/pgdata" + echo "Now updating PG data directory..." + [ -d "/pgdata/testpgdatadir" ] && mv "/pgdata/testpgdatadir" "/pgdata/pg13_bootstrap" + rm -f "/pgdata/pg13/patroni.dynamic.json" + echo "Updated PG data directory contents:" + ls -lh "/pgdata" + echo "PG Data directory preparation complete" image: example.com/crunchy-postgres-ha:test imagePullPolicy: Always name: pgdata-move-job @@ -808,12 +819,16 @@ containers: - command: - bash - -ceu - - "echo \"Preparing cluster testcluster volumes for PGO v5.x\"\n echo \"pg_wal_pvc=testwal\"\n - \ echo \"Current PG WAL directory volume contents:\"\n ls -lh \"/pgwal\"\n - \ echo \"Now updating PG WAL directory...\"\n [ -d \"/pgwal/testwaldir\" - ] && mv \"/pgwal/testwaldir\" \"/pgwal/testcluster-wal\"\n echo \"Updated PG - WAL directory contents:\"\n ls -lh \"/pgwal\"\n echo \"PG WAL directory - preparation complete\"\n " + - |- + echo "Preparing cluster testcluster volumes for PGO v5.x" + echo "pg_wal_pvc=testwal" + echo "Current PG WAL directory volume contents:" + ls -lh "/pgwal" + echo "Now updating PG WAL directory..." + [ -d "/pgwal/testwaldir" ] && mv "/pgwal/testwaldir" "/pgwal/testcluster-wal" + echo "Updated PG WAL directory contents:" + ls -lh "/pgwal" + echo "PG WAL directory preparation complete" image: example.com/crunchy-postgres-ha:test imagePullPolicy: Always name: pgwal-move-job @@ -868,14 +883,19 @@ containers: - command: - bash - -ceu - - "echo \"Preparing cluster testcluster pgBackRest repo volume for PGO v5.x\"\n - \ echo \"repo_pvc=testrepo\"\n echo \"pgbackrest directory:\"\n ls -lh - /pgbackrest\n echo \"Current pgBackRest repo directory volume contents:\" \n - \ ls -lh \"/pgbackrest/testrepodir\"\n echo \"Now updating repo directory...\"\n - \ [ -d \"/pgbackrest/testrepodir\" ] && mv -t \"/pgbackrest/\" \"/pgbackrest/testrepodir/archive\"\n - \ [ -d \"/pgbackrest/testrepodir\" ] && mv -t \"/pgbackrest/\" \"/pgbackrest/testrepodir/backup\"\n - \ echo \"Updated /pgbackrest directory contents:\"\n ls -lh \"/pgbackrest\"\n - \ echo \"Repo directory preparation complete\"\n " + - |- + echo "Preparing cluster testcluster pgBackRest repo volume for PGO v5.x" + echo "repo_pvc=testrepo" + echo "pgbackrest directory:" + ls -lh /pgbackrest + echo "Current pgBackRest repo directory volume contents:" + ls -lh "/pgbackrest/testrepodir" + echo "Now updating repo directory..." + [ -d "/pgbackrest/testrepodir" ] && mv -t "/pgbackrest/" "/pgbackrest/testrepodir/archive" + [ -d "/pgbackrest/testrepodir" ] && mv -t "/pgbackrest/" "/pgbackrest/testrepodir/backup" + echo "Updated /pgbackrest directory contents:" + ls -lh "/pgbackrest" + echo "Repo directory preparation complete" image: example.com/crunchy-pgbackrest:test imagePullPolicy: Always name: repo-move-job diff --git a/internal/controller/runtime/apply.go b/internal/controller/runtime/apply.go new file mode 100644 index 0000000000..18926488e5 --- /dev/null +++ b/internal/controller/runtime/apply.go @@ -0,0 +1,58 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Apply sends an apply patch with force=true using cc and updates object with any returned content. +// The client is responsible for setting fieldManager; see [client.WithFieldOwner]. +// +// - https://docs.k8s.io/reference/using-api/server-side-apply#managers +// - https://docs.k8s.io/reference/using-api/server-side-apply#conflicts +func Apply[ + // NOTE: This interface can go away following https://go.dev/issue/47487. + ClientPatch interface { + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + }, + T interface{ client.Object }, +](ctx context.Context, cc ClientPatch, object T) error { + // Generate an apply-patch by comparing the object to its zero value. + data, err := client.MergeFrom(*new(T)).Data(object) + apply := client.RawPatch(client.Apply.Type(), data) + + // Keep a copy of the object before any API calls. + intent := object.DeepCopyObject() + + // Send the apply-patch with force=true. + if err == nil { + err = cc.Patch(ctx, object, apply, client.ForceOwnership) + } + + // Some fields cannot be server-side applied correctly. + // When their outcome does not match the intent, send a json-patch to get really specific. + patch := NewJSONPatch() + + switch actual := any(object).(type) { + case *corev1.Service: + intent := intent.(*corev1.Service) + + // Service.Spec.Selector cannot be unset; perhaps https://issue.k8s.io/117447 + if !equality.Semantic.DeepEqual(actual.Spec.Selector, intent.Spec.Selector) { + patch.Replace("spec", "selector")(intent.Spec.Selector) + } + } + + // Send the json-patch when necessary. + if err == nil && !patch.IsEmpty() { + err = cc.Patch(ctx, object, patch) + } + return err +} diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/runtime/apply_test.go similarity index 60% rename from internal/controller/postgrescluster/apply_test.go rename to internal/controller/runtime/apply_test.go index a1fa6b7f14..c6f182ddeb 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/runtime/apply_test.go @@ -2,10 +2,9 @@ // // SPDX-License-Identifier: Apache-2.0 -package postgrescluster +package runtime_test import ( - "context" "errors" "regexp" "strings" @@ -14,7 +13,6 @@ import ( "github.com/google/go-cmp/cmp" "gotest.tools/v3/assert" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -24,17 +22,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/testing/require" ) func TestServerSideApply(t *testing.T) { - ctx := context.Background() - cfg, cc := setupKubernetes(t) + ctx := t.Context() + config, base := require.Kubernetes2(t) require.ParallelCapacity(t, 0) - ns := setupNamespace(t, cc) + ns := require.Namespace(t, base) - dc, err := discovery.NewDiscoveryClientForConfig(cfg) + dc, err := discovery.NewDiscoveryClientForConfig(config) assert.NilError(t, err) server, err := dc.ServerVersion() @@ -44,7 +43,7 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, err) t.Run("ObjectMeta", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(base, t.Name()) constructor := func() *corev1.ConfigMap { var cm corev1.ConfigMap cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) @@ -55,7 +54,7 @@ func TestServerSideApply(t *testing.T) { // Create the object. before := constructor() - assert.NilError(t, cc.Patch(ctx, before, client.Apply, reconciler.Owner)) + assert.NilError(t, cc.Patch(ctx, before, client.Apply)) assert.Assert(t, before.GetResourceVersion() != "") // Allow the Kubernetes API clock to advance. @@ -63,7 +62,7 @@ func TestServerSideApply(t *testing.T) { // client.Apply changes the ResourceVersion inadvertently. after := constructor() - assert.NilError(t, cc.Patch(ctx, after, client.Apply, reconciler.Owner)) + assert.NilError(t, cc.Patch(ctx, after, client.Apply)) assert.Assert(t, after.GetResourceVersion() != "") switch { @@ -78,16 +77,16 @@ func TestServerSideApply(t *testing.T) { assert.Assert(t, after.GetResourceVersion() == before.GetResourceVersion()) } - // Our apply method generates the correct apply-patch. + // Our [runtime.Apply] generates the correct apply-patch. again := constructor() - assert.NilError(t, reconciler.apply(ctx, again)) + assert.NilError(t, runtime.Apply(ctx, cc, again)) assert.Assert(t, again.GetResourceVersion() != "") assert.Assert(t, again.GetResourceVersion() == after.GetResourceVersion(), "expected to correctly no-op") }) t.Run("ControllerReference", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(base, t.Name()) // Setup two possible controllers. controller1 := new(corev1.ConfigMap) @@ -115,7 +114,7 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, controllerutil.SetControllerReference(controller2, applied, cc.Scheme())) - err1 := cc.Patch(ctx, applied, client.Apply, client.ForceOwnership, reconciler.Owner) + err1 := cc.Patch(ctx, applied, client.Apply, client.ForceOwnership) // Patch not accepted; the ownerReferences field is invalid. assert.Assert(t, apierrors.IsInvalid(err1), "got %#v", err1) @@ -127,8 +126,8 @@ func TestServerSideApply(t *testing.T) { assert.Assert(t, len(status.ErrStatus.Details.Causes) != 0) assert.Equal(t, status.ErrStatus.Details.Causes[0].Field, "metadata.ownerReferences") - // Try to change the controller using our apply method. - err2 := reconciler.apply(ctx, applied) + // Try to change the controller using our [runtime.Apply]. + err2 := runtime.Apply(ctx, cc, applied) // Same result; patch not accepted. assert.DeepEqual(t, err1, err2, @@ -142,40 +141,6 @@ func TestServerSideApply(t *testing.T) { ) }) - t.Run("StatefulSetStatus", func(t *testing.T) { - constructor := func(name string) *appsv1.StatefulSet { - var sts appsv1.StatefulSet - sts.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) - sts.Namespace, sts.Name = ns.Name, name - sts.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: map[string]string{"select": name}, - } - sts.Spec.Template.Labels = map[string]string{"select": name} - sts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "test", Image: "test"}} - return &sts - } - - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - upstream := constructor("status-upstream") - - // The structs defined in "k8s.io/api/apps/v1" marshal empty status fields. - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.22")): - assert.ErrorContains(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner), - "field not declared in schema", - "expected https://issue.k8s.io/109210") - - default: - assert.NilError(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner)) - } - - // Our apply method generates the correct apply-patch. - again := constructor("status-local") - assert.NilError(t, reconciler.apply(ctx, again)) - }) - t.Run("ServiceSelector", func(t *testing.T) { constructor := func(name string) *corev1.Service { var service corev1.Service @@ -187,60 +152,6 @@ func TestServerSideApply(t *testing.T) { return &service } - t.Run("wrong-keys", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - - intent := constructor("some-selector") - intent.Spec.Selector = map[string]string{"k1": "v1"} - - // Create the Service. - before := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) - - // Something external mucks it up. - assert.NilError(t, - cc.Patch(ctx, before, - client.RawPatch(client.Merge.Type(), []byte(`{"spec":{"selector":{"bad":"v2"}}}`)), - client.FieldOwner("wrong"))) - - // client.Apply cannot correct it in old versions of Kubernetes. - after := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) - - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.22")): - - assert.Assert(t, len(after.Spec.Selector) != len(intent.Spec.Selector), - "expected https://issue.k8s.io/97970, got %v", after.Spec.Selector) - - default: - assert.DeepEqual(t, after.Spec.Selector, intent.Spec.Selector) - } - - // Our apply method corrects it. - again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) - assert.DeepEqual(t, again.Spec.Selector, intent.Spec.Selector) - - var count int - var managed *metav1.ManagedFieldsEntry - for i := range again.ManagedFields { - if again.ManagedFields[i].Manager == t.Name() { - count++ - managed = &again.ManagedFields[i] - } - } - - assert.Equal(t, count, 1, "expected manager once in %v", again.ManagedFields) - assert.Equal(t, managed.Operation, metav1.ManagedFieldsOperationApply) - - assert.Assert(t, managed.FieldsV1 != nil) - assert.Assert(t, strings.Contains(string(managed.FieldsV1.Raw), `"f:selector":{`), - "expected f:selector in %s", managed.FieldsV1.Raw) - }) - for _, tt := range []struct { name string selector map[string]string @@ -249,7 +160,7 @@ func TestServerSideApply(t *testing.T) { {"empty", make(map[string]string)}, } { t.Run(tt.name, func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(base, t.Name()) intent := constructor(tt.name + "-selector") intent.Spec.Selector = tt.selector @@ -257,7 +168,7 @@ func TestServerSideApply(t *testing.T) { // Create the Service. before := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, before, client.Apply, client.ForceOwnership)) // Something external mucks it up. assert.NilError(t, @@ -268,14 +179,17 @@ func TestServerSideApply(t *testing.T) { // client.Apply cannot correct it. after := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, after, client.Apply, client.ForceOwnership)) + // Perhaps one of: + // - https://issue.k8s.io/117447 + // - https://github.com/kubernetes-sigs/structured-merge-diff/issues/259 assert.Assert(t, len(after.Spec.Selector) != len(intent.Spec.Selector), "got %v", after.Spec.Selector) - // Our apply method corrects it. + // Our [runtime.Apply] corrects it. again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) + assert.NilError(t, runtime.Apply(ctx, cc, again)) assert.Assert(t, equality.Semantic.DeepEqual(again.Spec.Selector, intent.Spec.Selector), "\n--- again.Spec.Selector\n+++ intent.Spec.Selector\n%v", diff --git a/internal/controller/runtime/client.go b/internal/controller/runtime/client.go index c41fe5a9c0..1bdbdddd14 100644 --- a/internal/controller/runtime/client.go +++ b/internal/controller/runtime/client.go @@ -76,6 +76,7 @@ func (fn ClientUpdate) Update(ctx context.Context, obj client.Object, opts ...cl return fn(ctx, obj, opts...) } +// WarningHandler implements [rest.WarningHandler] and [rest.WarningHandlerWithContext] as a single function. type WarningHandler func(ctx context.Context, code int, agent string, text string) func (fn WarningHandler) HandleWarningHeader(code int, agent string, text string) { diff --git a/internal/controller/runtime/conversion.go b/internal/controller/runtime/conversion.go index ae4495e865..57f7938f35 100644 --- a/internal/controller/runtime/conversion.go +++ b/internal/controller/runtime/conversion.go @@ -50,7 +50,7 @@ func FromUnstructuredObject[ FromUnstructured(object.UnstructuredContent(), result) } -// ToUnstructuredList returns a copy of list by marshaling through JSON. +// ToUnstructuredList returns a copy of list using reflection. func ToUnstructuredList(list client.ObjectList) (*unstructured.UnstructuredList, error) { content, err := runtime. DefaultUnstructuredConverter. @@ -61,7 +61,7 @@ func ToUnstructuredList(list client.ObjectList) (*unstructured.UnstructuredList, return result, err } -// ToUnstructuredObject returns a copy of object by marshaling through JSON. +// ToUnstructuredObject returns a copy of object using reflection. func ToUnstructuredObject(object client.Object) (*unstructured.Unstructured, error) { content, err := runtime. DefaultUnstructuredConverter. diff --git a/internal/kubeapi/patch.go b/internal/controller/runtime/patch.go similarity index 99% rename from internal/kubeapi/patch.go rename to internal/controller/runtime/patch.go index 95bcc9a6e1..955b93e1d4 100644 --- a/internal/kubeapi/patch.go +++ b/internal/controller/runtime/patch.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package kubeapi +package runtime import ( "strings" diff --git a/internal/kubeapi/patch_test.go b/internal/controller/runtime/patch_test.go similarity index 99% rename from internal/kubeapi/patch_test.go rename to internal/controller/runtime/patch_test.go index 05bd140066..07092be068 100644 --- a/internal/kubeapi/patch_test.go +++ b/internal/controller/runtime/patch_test.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package kubeapi +package runtime import ( "encoding/json" diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go deleted file mode 100644 index 23df91192f..0000000000 --- a/internal/controller/standalone_pgadmin/apply.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package standalone_pgadmin - -import ( - "context" - "reflect" - - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set by -// r.Writer and the force parameter is true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -// -// TODO(tjmoore4): This function is duplicated from a version that takes a PostgresCluster object. -func (r *PGAdminReconciler) apply(ctx context.Context, object client.Object) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() - data, err := client.MergeFrom(zero.(client.Object)).Data(object) - apply := client.RawPatch(client.Apply.Type(), data) - - // Send the apply-patch with force=true. - if err == nil { - err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) - } - - return err -} diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go index d2378802c3..95c0bd9be5 100644 --- a/internal/controller/standalone_pgadmin/configmap.go +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -19,6 +19,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/collector" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -43,7 +44,7 @@ func (r *PGAdminReconciler) reconcilePGAdminConfigMap( err = errors.WithStack(r.setControllerReference(pgadmin, configmap)) } if err == nil { - err = errors.WithStack(r.apply(ctx, configmap)) + err = errors.WithStack(runtime.Apply(ctx, r.Writer, configmap)) } return configmap, err diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index fe205dcaf6..7e3d0c8355 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -14,10 +14,12 @@ import ( "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" @@ -48,7 +50,7 @@ type PGAdminReconciler struct { Recorder record.EventRecorder } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={get,list,watch} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} //+kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list,watch} //+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} @@ -56,7 +58,7 @@ type PGAdminReconciler struct { //+kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list,watch} // ManagedReconciler creates a [PGAdminReconciler] and adds it to m. -func ManagedReconciler(m ctrl.Manager) error { +func ManagedReconciler(m manager.Manager) error { exec, err := runtime.NewPodExecutor(m.GetConfig()) kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPGAdmin) recorder := m.GetEventRecorderFor(naming.ControllerPGAdmin) @@ -69,7 +71,7 @@ func ManagedReconciler(m ctrl.Manager) error { Writer: kubernetes, } - return errors.Join(err, ctrl.NewControllerManagedBy(m). + return errors.Join(err, builder.ControllerManagedBy(m). For(&v1beta1.PGAdmin{}). Owns(&corev1.ConfigMap{}). Owns(&corev1.PersistentVolumeClaim{}). @@ -78,39 +80,29 @@ func ManagedReconciler(m ctrl.Manager) error { Owns(&corev1.Service{}). Watches( v1beta1.NewPostgresCluster(), - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []reconcile.Request { return runtime.Requests(reconciler.findPGAdminsForPostgresCluster(ctx, cluster)...) }), ). Watches( &corev1.Secret{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []reconcile.Request { return runtime.Requests(reconciler.findPGAdminsForSecret(ctx, client.ObjectKeyFromObject(secret))...) }), ). - Complete(reconciler)) + Complete(reconcile.AsReconciler(kubernetes, reconciler))) } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={get} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins/status",verbs={patch} -// Reconcile which aims to move the current state of the pgAdmin closer to the -// desired state described in a [v1beta1.PGAdmin] identified by request. -func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// Reconcile moves the current state of pgAdmin closer to the state described in its specification. +func (r *PGAdminReconciler) Reconcile(ctx context.Context, pgAdmin *v1beta1.PGAdmin) (reconcile.Result, error) { var err error ctx, span := tracing.Start(ctx, "reconcile-pgadmin") log := logging.FromContext(ctx) defer span.End() - pgAdmin := &v1beta1.PGAdmin{} - if err := r.Reader.Get(ctx, req.NamespacedName, pgAdmin); err != nil { - // NotFound cannot be fixed by requeuing so ignore it. During background - // deletion, we receive delete events from pgadmin's dependents after - // pgadmin is deleted. - return ctrl.Result{}, tracing.Escape(span, client.IgnoreNotFound(err)) - } - // Write any changes to the pgadmin status on the way out. before := pgAdmin.DeepCopy() defer func() { @@ -163,7 +155,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct log.V(1).Info("Reconciled pgAdmin") } - return ctrl.Result{}, tracing.Escape(span, err) + return reconcile.Result{}, tracing.Escape(span, err) } // The owner reference created by controllerutil.SetControllerReference blocks diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go index 8f21da4765..43835b31d6 100644 --- a/internal/controller/standalone_pgadmin/service.go +++ b/internal/controller/standalone_pgadmin/service.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -100,7 +101,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( return err } - return errors.WithStack(r.apply(ctx, service)) + return errors.WithStack(runtime.Apply(ctx, r.Writer, service)) } // If we get here then ServiceName was not provided through the spec diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index 8e507acdad..a431ad5d3f 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -16,6 +16,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/collector" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/util" @@ -55,7 +56,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( if err := errors.WithStack(r.setControllerReference(pgadmin, sts)); err != nil { return err } - return errors.WithStack(r.apply(ctx, sts)) + return errors.WithStack(runtime.Apply(ctx, r.Writer, sts)) } // statefulset defines the StatefulSet needed to run pgAdmin. diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 959437762f..e66ee43eab 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -18,6 +18,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -79,28 +80,53 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * return nil } - // If the pgAdmin version is not in the status or the image SHA has changed, get - // the pgAdmin version and store it in the status. - var pgadminVersion int - if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.ImageSHA != pgAdminImageSha { - pgadminVersion, err = r.reconcilePGAdminMajorVersion(ctx, podExecutor) + // If the pgAdmin major or minor version is not in the status or the image + // SHA has changed, get the pgAdmin version and store it in the status. + var pgadminMajorVersion int + if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.MinorVersion == "" || + pgadmin.Status.ImageSHA != pgAdminImageSha { + + // exec into the pgAdmin pod and retrieve the pgAdmin minor version + script := fmt.Sprintf(` +PGADMIN_DIR=%s +cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_VERSION)" +`, pgAdminDir) + + var stdin, stdout, stderr bytes.Buffer + + if err := podExecutor(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...); err != nil { + return err + } + + pgadminMinorVersion := strings.TrimSpace(stdout.String()) + + // ensure minor version is valid before storing in status + parsedMinorVersion, err := strconv.ParseFloat(pgadminMinorVersion, 64) if err != nil { return err } - pgadmin.Status.MajorVersion = pgadminVersion + + // Note: "When converting a floating-point number to an integer, the + // fraction is discarded (truncation towards zero)." + // - https://go.dev/ref/spec#Conversions + pgadminMajorVersion = int(parsedMinorVersion) + + pgadmin.Status.MinorVersion = pgadminMinorVersion + pgadmin.Status.MajorVersion = pgadminMajorVersion pgadmin.Status.ImageSHA = pgAdminImageSha } else { - pgadminVersion = pgadmin.Status.MajorVersion + pgadminMajorVersion = pgadmin.Status.MajorVersion } // If the pgAdmin version is not v8 or higher, return early as user management is // only supported for pgAdmin v8 and higher. - if pgadminVersion < 8 { + if pgadminMajorVersion < 8 { // If pgAdmin version is less than v8 and user management is being attempted, // log a message clarifying that it is only supported for pgAdmin v8 and higher. if len(pgadmin.Spec.Users) > 0 { log.Info("User management is only supported for pgAdmin v8 and higher.", - "pgadminVersion", pgadminVersion) + "pgadminVersion", pgadminMajorVersion) } return err } @@ -108,25 +134,6 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * return r.writePGAdminUsers(ctx, pgadmin, podExecutor) } -// reconcilePGAdminMajorVersion execs into the pgAdmin pod and retrieves the pgAdmin major version -func (r *PGAdminReconciler) reconcilePGAdminMajorVersion(ctx context.Context, exec Executor) (int, error) { - script := fmt.Sprintf(` -PGADMIN_DIR=%s -cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)" -`, pgAdminDir) - - var stdin, stdout, stderr bytes.Buffer - - err := exec(ctx, &stdin, &stdout, &stderr, - []string{"bash", "-ceu", "--", script}...) - - if err != nil { - return 0, err - } - - return strconv.Atoi(strings.TrimSpace(stdout.String())) -} - // writePGAdminUsers takes the users in the pgAdmin spec and writes (adds or updates) their data // to both pgAdmin and the users.json file that is stored in the pgAdmin secret. If a user is // removed from the spec, its data is removed from users.json, but it is not deleted from pgAdmin. @@ -170,10 +177,25 @@ cd $PGADMIN_DIR for _, user := range existingUsersArr { existingUsersMap[user.Username] = user } + + var olderThan9_3 bool + versionFloat, err := strconv.ParseFloat(pgadmin.Status.MinorVersion, 64) + if err != nil { + return err + } + if versionFloat < 9.3 { + olderThan9_3 = true + } + intentUsers := []pgAdminUserForJson{} for _, user := range pgadmin.Spec.Users { var stdin, stdout, stderr bytes.Buffer - typeFlag := "--nonadmin" + // starting in pgAdmin 9.3, custom roles are supported and a new flag is used + // - https://github.com/pgadmin-org/pgadmin4/pull/8631 + typeFlag := "--role User" + if olderThan9_3 { + typeFlag = "--nonadmin" + } isAdmin := false if user.Role == "Administrator" { typeFlag = "--admin" @@ -229,8 +251,13 @@ cd $PGADMIN_DIR log.Error(err, "PodExec failed: ") intentUsers = append(intentUsers, existingUser) continue + + } else if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + // Started seeing this error with pgAdmin 9.7 when using Python 3.11. + // Issue appears to resolve with Python 3.13. + log.Info(stderr.String()) } else if strings.TrimSpace(stderr.String()) != "" { - log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py update-user error for %s: ", intentUser.Username)) intentUsers = append(intentUsers, existingUser) continue @@ -263,8 +290,12 @@ cd $PGADMIN_DIR log.Error(err, "PodExec failed: ") continue } - if strings.TrimSpace(stderr.String()) != "" { - log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + // Started seeing this error with pgAdmin 9.7 when using Python 3.11. + // Issue appears to resolve with Python 3.13. + log.Info(stderr.String()) + } else if strings.TrimSpace(stderr.String()) != "" { + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py add-user error for %s: ", intentUser.Username)) continue } @@ -297,7 +328,7 @@ cd $PGADMIN_DIR err = errors.WithStack(r.setControllerReference(pgadmin, intentUserSecret)) if err == nil { - err = errors.WithStack(r.apply(ctx, intentUserSecret)) + err = errors.WithStack(runtime.Apply(ctx, r.Writer, intentUserSecret)) } return err diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index 5ec58dc573..47893a4feb 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -110,15 +110,16 @@ func TestReconcilePGAdminUsers(t *testing.T) { assert.Equal(t, namespace, pgadmin.Namespace) assert.Equal(t, container, naming.ContainerPGAdmin) - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - _, _ = stdout.Write([]byte("7")) + // Simulate a v7.1 version of pgAdmin by setting stdout to "7.1" + // for podexec call in reconcilePGAdminVersion + _, _ = stdout.Write([]byte("7.1")) return nil } assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) assert.Equal(t, calls, 1, "PodExec should be called once") assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.MinorVersion, "7.1") assert.Equal(t, pgadmin.Status.ImageSHA, "fakeSHA") }) @@ -145,78 +146,89 @@ func TestReconcilePGAdminUsers(t *testing.T) { ) error { calls++ - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - _, _ = stdout.Write([]byte("7")) + // Simulate a v7.1 version of pgAdmin by setting stdout to "7.1" + // for podexec call in reconcilePGAdminVersion + _, _ = stdout.Write([]byte("7.1")) return nil } assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) assert.Equal(t, calls, 1, "PodExec should be called once") assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.MinorVersion, "7.1") assert.Equal(t, pgadmin.Status.ImageSHA, "newFakeSHA") }) -} -func TestReconcilePGAdminMajorVersion(t *testing.T) { - ctx := context.Background() - pod := corev1.Pod{} - pod.Namespace = "test-namespace" - pod.Name = "pgadmin-123-0" - reconciler := &PGAdminReconciler{} + t.Run("PodHealthyBadVersion", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() - podExecutor := func( - ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) - } + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" - t.Run("SuccessfulRetrieval", func(t *testing.T) { - reconciler.PodExec = func( + r := new(PGAdminReconciler) + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { + calls++ + assert.Equal(t, pod, "pgadmin-123-0") - assert.Equal(t, namespace, "test-namespace") + assert.Equal(t, namespace, pgadmin.Namespace) assert.Equal(t, container, naming.ContainerPGAdmin) - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - _, _ = stdout.Write([]byte("7")) + // set expected version to something completely wrong + _, _ = stdout.Write([]byte("woot")) return nil } - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) - assert.NilError(t, err) - assert.Equal(t, version, 7) + assert.ErrorContains(t, r.reconcilePGAdminUsers(ctx, pgadmin), "strconv.ParseFloat: parsing \"woot\": invalid syntax") + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 0) + assert.Equal(t, pgadmin.Status.MinorVersion, "") + assert.Equal(t, pgadmin.Status.ImageSHA, "") }) - t.Run("FailedRetrieval", func(t *testing.T) { - reconciler.PodExec = func( - ctx context.Context, namespace, pod, container string, - stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - // Simulate the python call giving bad data (not a version int) - _, _ = stdout.Write([]byte("asdfjkl;")) - return nil - } + t.Run("PodExecError", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) - assert.Check(t, err != nil) - assert.Equal(t, version, 0) - }) + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" - t.Run("PodExecError", func(t *testing.T) { - reconciler.PodExec = func( + r := new(PGAdminReconciler) + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { + calls++ + + assert.Equal(t, pod, "pgadmin-123-0") + assert.Equal(t, namespace, pgadmin.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + return errors.New("PodExecError") } - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) - assert.Check(t, err != nil) - assert.Equal(t, version, 0) + assert.Error(t, r.reconcilePGAdminUsers(ctx, pgadmin), "PodExecError") + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 0) + assert.Equal(t, pgadmin.Status.MinorVersion, "") + assert.Equal(t, pgadmin.Status.ImageSHA, "") }) } @@ -244,6 +256,14 @@ func TestWritePGAdminUsers(t *testing.T) { }`) assert.NilError(t, cc.Create(ctx, pgadmin)) + // fake the status so that the correct commands will be used when creating + // users. + pgadmin.Status = v1beta1.PGAdminStatus{ + ImageSHA: "fakesha", + MajorVersion: 9, + MinorVersion: "9.3", + } + userPasswordSecret1 := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "user-password-secret1", diff --git a/internal/controller/standalone_pgadmin/volume.go b/internal/controller/standalone_pgadmin/volume.go index a3e26682ef..a4d0a5e13d 100644 --- a/internal/controller/standalone_pgadmin/volume.go +++ b/internal/controller/standalone_pgadmin/volume.go @@ -14,6 +14,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -32,7 +33,7 @@ func (r *PGAdminReconciler) reconcilePGAdminDataVolume( if err == nil { err = r.handlePersistentVolumeClaimError(pgadmin, - errors.WithStack(r.apply(ctx, pvc))) + errors.WithStack(runtime.Apply(ctx, r.Writer, pvc))) } return pvc, err diff --git a/internal/crd/post-process.go b/internal/crd/post-process.go index 5aac230897..3117e16ac0 100644 --- a/internal/crd/post-process.go +++ b/internal/crd/post-process.go @@ -12,6 +12,7 @@ import ( "log/slog" "os" "path/filepath" + "regexp" "github.com/itchyny/gojq" "sigs.k8s.io/yaml" @@ -44,8 +45,12 @@ func main() { panic(err) } + // Turn top-level strings that start with octothorpe U+0023 into YAML comments by removing their quotes. + yamlData := need(yaml.Marshal(v)) + yamlData = regexp.MustCompile(`(?m)^'(#[^']*)'(.*)$`).ReplaceAll(yamlData, []byte("$1$2")) + slog.Info("Writing", "file", yamlName) - must(os.WriteFile(yamlPath, append([]byte("---\n"), need(yaml.Marshal(v))...), 0o644)) + must(os.WriteFile(yamlPath, append([]byte("---\n"), yamlData...), 0o644)) } if _, ok := result.Next(); ok { diff --git a/internal/crd/post-process.jq b/internal/crd/post-process.jq index 935ab09a88..fccf0a9d73 100644 --- a/internal/crd/post-process.jq +++ b/internal/crd/post-process.jq @@ -3,6 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # # This [jq] filter modifies a Kubernetes CustomResourceDefinition. +# Use the controller-gen "+kubebuilder:title" marker to identify schemas that need special manipulation. # # [jq]: https://jqlang.org @@ -10,14 +11,32 @@ # https://jqlang.org/manual#multiplication-division-modulo def merge(stream): reduce stream as $i ({}; . * $i); +# https://pkg.go.dev/k8s.io/api/core/v1#ImageVolumeSource +reduce paths(try .title == "$corev1.ImageVolumeSource") as $path (.; + getpath($path) as $schema | + setpath($path; $schema * { + required: (["reference"] + ($schema.required // []) | sort), + properties: { + pullPolicy: { enum: ["Always", "Never", "IfNotPresent"] }, + reference: { minLength: 1 } + } + } | del(.title)) +) | + +# Kubernetes assumes the evaluation cost of an enum value is very large: https://issue.k8s.io/119511 +# Look at every schema that has a populated "enum" property. +reduce paths(try .enum | length > 0) as $path (.; + getpath($path) as $schema | + setpath($path; $schema + { maxLength: ($schema.enum | map(length) | max) }) +) | + # Kubernetes does not consider "allOf" when estimating CEL cost: https://issue.k8s.io/134029 # controller-gen might produce "allOf" when combining markers: # https://github.com/kubernetes-sigs/controller-tools/issues/1270 # # This (partially) addresses both by keeping only the smallest max, largest min, etc. -# -# Look at every schema that has an "allOf" property. -reduce paths(try .allOf) as $path (.; +# Look at every schema that has a populated "allOf" property. +reduce paths(try .allOf | length > 0) as $path (.; ( getpath($path) | merge( ., @@ -58,4 +77,22 @@ reduce paths(try .["x-kubernetes-int-or-string"] == true) as $path (.; end ) | +# Rename Kubebuilder annotations and move them to the top-level. +# The caller can turn these into YAML comments. +. += (.metadata.annotations | with_entries(select(.key | startswith("controller-gen.kubebuilder")) | .key = "# \(.key)")) | +.metadata.annotations |= with_entries(select(.key | startswith("controller-gen.kubebuilder") | not)) | + +# Remove nulls and empty objects from metadata. +# Some very old generators would set a null creationTimestamp. +# +# https://github.com/kubernetes-sigs/controller-tools/issues/402 +# https://issue.k8s.io/67610 +del(.metadata | .. | select(length == 0)) | + +# Remove status to avoid conflicts with the CRD controller. +# Some very old generators would set this field. +# +# https://github.com/kubernetes-sigs/controller-tools/issues/456 +del(.status) | + . diff --git a/internal/testing/validation/pgadmin_test.go b/internal/crd/validation/pgadmin_test.go similarity index 100% rename from internal/testing/validation/pgadmin_test.go rename to internal/crd/validation/pgadmin_test.go diff --git a/internal/crd/validation/pgbackrest_test.go b/internal/crd/validation/pgbackrest_test.go new file mode 100644 index 0000000000..6226967057 --- /dev/null +++ b/internal/crd/validation/pgbackrest_test.go @@ -0,0 +1,310 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package validation + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" +) + +func TestV1PGBackRestLogging(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + t.Parallel() + + namespace := require.Namespace(t, cc) + + base := v1.NewPostgresCluster() + base.Namespace = namespace.Name + base.Name = "pgbackrest-logging" + // required fields + require.UnmarshalInto(t, &base.Spec, `{ + postgresVersion: 16, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + backups: { + pgbackrest: { + repos: [{ + name: repo1, + }] + }, + }, + }`) + + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base to be valid") + + t.Run("Cannot set log-path via global", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + global: { + log-path: "/anything" + } + }`) + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "pgbackrest log-path must be set via the various log.path fields in the spec") + }) + + t.Run("Cannot set pgbackrest sidecar's log.path without correct subdir", func(t *testing.T) { + tmp := base.DeepCopy() + + t.Run("Wrong subdir", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/something/wrong" + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "pgbackrest sidecar log path is restricted to an existing additional volume") + }) + + t.Run("Single instance - missing additional volume", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Multiple instances - one missing additional volume", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }]`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Single instance - additional volume present", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + }]`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid") + }) + + t.Run("Multiple instances - additional volume present but not matching path", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "another", + claimName: "another-pvc-claim" + }] + } + }]`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Multiple instances - additional volumes present and matching log path", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "another-pvc-claim" + }] + } + }]`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid") + }) + }) + + t.Run("Cannot set logging on volumes that don't exist", func(t *testing.T) { + t.Run("Repo Host", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + repoHost: { + log: { + path: "/volumes/wrong" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "repo host log path is restricted to an existing additional volume") + }) + + t.Run("Backup Jobs", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + jobs: { + log: { + path: "/volumes/wrong" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "backup jobs log path is restricted to an existing additional volume") + }) + }) + + t.Run("Can set logging on volumes that do exist", func(t *testing.T) { + t.Run("Repo Host", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + repoHost: { + log: { + path: "/volumes/logging/logs" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), + "expected this configuration to be valid") + }) + + t.Run("Backup Jobs", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + jobs: { + log: { + path: "/volumes/logging/logs" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), + "expected this configuration to be valid") + }) + }) +} diff --git a/internal/testing/validation/pgbouncer_test.go b/internal/crd/validation/pgbouncer_test.go similarity index 100% rename from internal/testing/validation/pgbouncer_test.go rename to internal/crd/validation/pgbouncer_test.go diff --git a/internal/testing/validation/postgrescluster/postgres_authentication_test.go b/internal/crd/validation/postgrescluster/postgres_authentication_test.go similarity index 100% rename from internal/testing/validation/postgrescluster/postgres_authentication_test.go rename to internal/crd/validation/postgrescluster/postgres_authentication_test.go diff --git a/internal/testing/validation/postgrescluster/postgres_config_test.go b/internal/crd/validation/postgrescluster/postgres_config_test.go similarity index 95% rename from internal/testing/validation/postgrescluster/postgres_config_test.go rename to internal/crd/validation/postgrescluster/postgres_config_test.go index d9529a8d0f..5a636ac439 100644 --- a/internal/testing/validation/postgrescluster/postgres_config_test.go +++ b/internal/crd/validation/postgrescluster/postgres_config_test.go @@ -226,25 +226,31 @@ func TestPostgresConfigParametersV1(t *testing.T) { message: `"/pgwal/logs/postgres"`, }, - // Directories inside /volumes are acceptable, but every instance set needs additional volumes. - // - // TODO(validation): This could be more precise and check the directory name of each additional - // volume, but Kubernetes 1.33 incorrectly estimates the cost of volume.name: - // https://github.com/kubernetes-sigs/controller-tools/pull/1270#issuecomment-3272211184 + // Directories inside /volumes are acceptable, but every instance set needs the correct additional volume. { - name: "two instance sets and two additional volumes", - value: "/volumes/anything", + name: "two instance sets and two correct additional volumes", + value: "/volumes/yep", instances: `[ - { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: dir, claimName: a }] } }, - { name: two, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: dir, claimName: b }] } }, + { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: a }] } }, + { name: two, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: b }] } }, ]`, valid: true, }, + { + name: "two instance sets and one correct additional volume", + value: "/volumes/yep", + instances: `[ + { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: a }] } }, + { name: two, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: diff, claimName: b }] } }, + ]`, + valid: false, + message: `all instances need an additional volume`, + }, { name: "two instance sets and one additional volume", - value: "/volumes/anything", + value: "/volumes/yep", instances: `[ - { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: dir, claimName: a }] } }, + { name: one, dataVolumeClaimSpec: ` + volume + `, volumes: { additional: [{ name: yep, claimName: a }] } }, { name: two, dataVolumeClaimSpec: ` + volume + ` }, ]`, valid: false, @@ -252,7 +258,7 @@ func TestPostgresConfigParametersV1(t *testing.T) { }, { name: "two instance sets and no additional volumes", - value: "/volumes/anything", + value: "/volumes/yep", instances: `[ { name: one, dataVolumeClaimSpec: ` + volume + ` }, { name: two, dataVolumeClaimSpec: ` + volume + ` }, diff --git a/internal/testing/validation/postgrescluster/postgres_users_test.go b/internal/crd/validation/postgrescluster/postgres_users_test.go similarity index 100% rename from internal/testing/validation/postgrescluster/postgres_users_test.go rename to internal/crd/validation/postgrescluster/postgres_users_test.go diff --git a/internal/crd/validation/postgrescluster_test.go b/internal/crd/validation/postgrescluster_test.go new file mode 100644 index 0000000000..e491c47d25 --- /dev/null +++ b/internal/crd/validation/postgrescluster_test.go @@ -0,0 +1,294 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package validation + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" + v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestPostgresUserInterfaceAcrossVersions(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + t.Parallel() + + namespace := require.Namespace(t, cc) + + base := v1beta1.NewPostgresCluster() + // Start with a bunch of required fields. + base.Namespace = namespace.Name + base.Name = "postgres-pgadmin" + require.UnmarshalInto(t, &base.Spec, `{ + userInterface: { + pgAdmin: { + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }, + }, + postgresVersion: 16, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + }`) + + v1base := v1.NewPostgresCluster() + // Start with a bunch of required fields. + v1base.Namespace = namespace.Name + v1base.Name = "postgres-pgadmin" + require.UnmarshalInto(t, &v1base.Spec, `{ + userInterface: { + pgAdmin: { + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }, + }, + postgresVersion: 16, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + }`) + + t.Run("v1beta1 is valid with pgadmin", func(t *testing.T) { + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base cluster to be valid") + }) + t.Run("v1 is invalid with pgadmin", func(t *testing.T) { + assert.ErrorContains(t, cc.Create(ctx, v1base.DeepCopy(), client.DryRunAll), + "userInterface not available in v1") + }) + + t.Run("v1 is valid with pgadmin but only if unchanged from v1beta1", func(t *testing.T) { + // Validation ratcheting is enabled starting in Kubernetes 1.30 + require.KubernetesAtLeast(t, "1.30") + + // A v1 that has been updated from a v1beta1 with no change to the userInterface is valid + assert.NilError(t, cc.Create(ctx, base), + "expected this base cluster to be valid") + v1base.ResourceVersion = base.ResourceVersion + assert.NilError(t, cc.Update(ctx, v1base), + "expected this v1 cluster to be a valid update") + + // But will not be valid if there's a change to the userInterface + require.UnmarshalInto(t, &v1base.Spec, `{ + userInterface: { + pgAdmin: { + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce, ReadWriteMany], + resources: { requests: { storage: 2Mi } }, + }, + }, + }, + }`) + + assert.ErrorContains(t, cc.Update(ctx, v1base), + "userInterface not available in v1") + }) +} + +func TestAdditionalVolumes(t *testing.T) { + ctx := context.Background() + cc := require.KubernetesAtLeast(t, "1.30") + dryrun := client.NewDryRunClient(cc) + t.Parallel() + + namespace := require.Namespace(t, cc) + base := v1beta1.NewPostgresCluster() + + base.Namespace = namespace.Name + base.Name = "image-volume-source-test" + // required fields + require.UnmarshalInto(t, &base.Spec, `{ + postgresVersion: 16, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + }`) + + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base to be valid") + + var unstructuredBase unstructured.Unstructured + require.UnmarshalInto(t, &unstructuredBase, require.Value(yaml.Marshal(base))) + + t.Run("Cannot set both image and claimName", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim", + image: { + reference: "test-image", + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + + err := dryrun.Create(ctx, tmp.DeepCopy()) + assert.Assert(t, apierrors.IsInvalid(err)) + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) + assert.Equal(t, details.Causes[0].Field, "spec.instances[0].volumes.additional[0]") + assert.ErrorContains(t, err, "you must set only one of image or claimName") + }) + + t.Run("Cannot set readOnly to false when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + reference: "test-image", + pullPolicy: Always + }, + readOnly: false + }] + } + }]`, "spec", "instances") + + err := dryrun.Create(ctx, tmp.DeepCopy()) + assert.Assert(t, apierrors.IsInvalid(err)) + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) + assert.Equal(t, details.Causes[0].Field, "spec.instances[0].volumes.additional[0]") + assert.ErrorContains(t, err, "image volumes must be readOnly") + }) + + t.Run("Reference must be set when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + + err := dryrun.Create(ctx, tmp.DeepCopy()) + assert.Assert(t, apierrors.IsInvalid(err)) + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 2)) + assert.Assert(t, cmp.Equal(details.Causes[0].Field, "spec.instances[0].volumes.additional[0].image.reference")) + assert.Assert(t, cmp.Equal(details.Causes[0].Type, "FieldValueRequired")) + assert.ErrorContains(t, err, "Required") + }) + + t.Run("Reference cannot be an empty string when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + reference: "", + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + + err := dryrun.Create(ctx, tmp.DeepCopy()) + assert.Assert(t, apierrors.IsInvalid(err)) + + details := require.StatusErrorDetails(t, err) + assert.Assert(t, cmp.Len(details.Causes, 1)) + assert.Assert(t, cmp.Equal(details.Causes[0].Field, "spec.instances[0].volumes.additional[0].image.reference")) + assert.Assert(t, cmp.Equal(details.Causes[0].Type, "FieldValueInvalid")) + assert.ErrorContains(t, err, "at least 1 chars long") + }) + + t.Run("ReadOnly can be omitted or set true when using image volume", func(t *testing.T) { + tmp := unstructuredBase.DeepCopy() + + require.UnmarshalIntoField(t, tmp, `[{ + name: "test-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + image: { + reference: "test-image", + pullPolicy: Always + }, + }] + } + }, { + name: "another-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "another", + image: { + reference: "another-image", + pullPolicy: Always + }, + readOnly: true + }] + } + }]`, "spec", "instances") + assert.NilError(t, dryrun.Create(ctx, tmp.DeepCopy())) + }) +} diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 090f119d1c..f411491fc8 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "path" - "strconv" "strings" "time" @@ -71,8 +70,8 @@ const ( // CreatePGBackRestConfigMapIntent creates a configmap struct with pgBackRest pgbackrest.conf settings in the data field. // The keys within the data field correspond to the use of that configuration. -// pgbackrest_job.conf is used by certain jobs, such as stanza create and backup -// pgbackrest_primary.conf is used by the primary database pod +// pgbackrest-server.conf is used by the pgBackRest TLS server +// pgbackrest_instance.conf is used by the primary database pod // pgbackrest_repo.conf is used by the pgBackRest repository pod // pgbackrest_cloud.conf is used by cloud repo backup jobs func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, @@ -109,9 +108,10 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet populatePGInstanceConfigurationMap( serviceName, serviceNamespace, repoHostName, pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), - strconv.Itoa(postgresCluster.Spec.PostgresVersion), + fmt.Sprint(postgresCluster.Spec.PostgresVersion), pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, + util.GetPGBackRestLogPathForInstance(postgresCluster), ).String() // As the cluster transitions from having a repository host to having none, @@ -122,34 +122,31 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet serverConfig(postgresCluster).String() if RepoHostVolumeDefined(postgresCluster) && repoHostName != "" { + // Get pgbackrest log path for repo host pod + pgBackRestLogPath := generateRepoHostLogPath(postgresCluster) + cm.Data[CMRepoKey] = iniGeneratedWarning + populateRepoHostConfigurationMap( serviceName, serviceNamespace, pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), - strconv.Itoa(postgresCluster.Spec.PostgresVersion), + fmt.Sprint(postgresCluster.Spec.PostgresVersion), pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, + pgBackRestLogPath, ).String() if collector.OpenTelemetryLogsOrMetricsEnabled(ctx, postgresCluster) { - err = collector.AddToConfigMap(ctx, collector.NewConfigForPgBackrestRepoHostPod( ctx, postgresCluster.Spec.Instrumentation, postgresCluster.Spec.Backups.PGBackRest.Repos, + pgBackRestLogPath, ), cm) // If OTel logging is enabled, add logrotate config for the RepoHost if err == nil && collector.OpenTelemetryLogsEnabled(ctx, postgresCluster) { - var pgBackRestLogPath string - for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if repo.Volume != nil { - pgBackRestLogPath = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } collector.AddLogrotateConfigs(ctx, postgresCluster.Spec.Instrumentation, cm, []collector.LogrotateConfig{{ LogFiles: []string{pgBackRestLogPath + "/*.log"}, @@ -163,7 +160,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet populateCloudRepoConfigurationMap( serviceName, serviceNamespace, pgdataDir, config.FetchKeyCommand(&postgresCluster.Spec), - strconv.Itoa(postgresCluster.Spec.PostgresVersion), + fmt.Sprint(postgresCluster.Spec.PostgresVersion), cloudLogPath, pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, @@ -180,13 +177,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, cluster *v1beta1.PostgresCluster) string { - var pgBackRestLogPath string - for _, repo := range cluster.Spec.Backups.PGBackRest.Repos { - if repo.Volume != nil { - pgBackRestLogPath = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } + pgBackRestLogPath := generateRepoHostLogPath(cluster) container := corev1.Container{ // TODO(log-rotation): The second argument here should be the path @@ -221,10 +212,9 @@ func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, // - Renames the data directory as needed to bootstrap the cluster using the restored database. // This ensures compatibility with the "existing" bootstrap method that is included in the // Patroni config when bootstrapping a cluster using an existing data directory. -func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, _ []*corev1.PersistentVolumeClaim, args ...string) []string { - ps := postgres.NewParameterSet() +func RestoreCommand(postgresVersion int32, pgdata string, params *postgres.ParameterSet, args ...string) []string { + ps := params.DeepCopy() ps.Add("data_directory", pgdata) - ps.Add("huge_pages", hugePagesSetting) // Keep history and WAL files until the cluster starts with its normal // archiving enabled. @@ -235,10 +225,6 @@ func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, _ []*corev // progress during recovery. ps.Add("hot_standby", "on") - if fetchKeyCommand != "" { - ps.Add("encryption_key_command", fetchKeyCommand) - } - configure := strings.Join([]string{ // With "hot_standby" on, some parameters cannot be smaller than they were // when Postgres was backed up. Configure these to match values reported by @@ -280,6 +266,7 @@ func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, _ []*corev script := strings.Join([]string{ `declare -r PGDATA="$1" opts="$2"; export PGDATA PGHOST`, + postgres.ShellPath(postgresVersion), // Remove any "postmaster.pid" file leftover from a prior failure. `rm -f "${PGDATA}/postmaster.pid"`, @@ -380,7 +367,7 @@ func populatePGInstanceConfigurationMap( serviceName, serviceNamespace, repoHostName, pgdataDir, fetchKeyCommand, postgresVersion string, pgPort int32, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string, + globalConfig map[string]string, pgBackRestLogPath string, ) iniSectionSet { // TODO(cbandy): pass a FQDN in already. @@ -396,7 +383,7 @@ func populatePGInstanceConfigurationMap( // pgBackRest spool-path should always be co-located with the Postgres WAL path. global.Set("spool-path", "/pgdata/pgbackrest-spool") // pgBackRest will log to the pgData volume for commands run on the PostgreSQL instance - global.Set("log-path", naming.PGBackRestPGDataLogPath) + global.Set("log-path", pgBackRestLogPath) for _, repo := range repos { global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) @@ -450,13 +437,12 @@ func populateRepoHostConfigurationMap( serviceName, serviceNamespace, pgdataDir, fetchKeyCommand, postgresVersion string, pgPort int32, pgHosts []string, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string, + globalConfig map[string]string, logPath string, ) iniSectionSet { global := iniMultiSet{} stanza := iniMultiSet{} - var pgBackRestLogPathSet bool for _, repo := range repos { global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) @@ -468,20 +454,14 @@ func populateRepoHostConfigurationMap( global.Set(option, val) } } - - if !pgBackRestLogPathSet && repo.Volume != nil { - // pgBackRest will log to the first configured repo volume when commands - // are run on the pgBackRest repo host. With our previous check in - // RepoHostVolumeDefined(), we've already validated that at least one - // defined repo has a volume. - global.Set("log-path", fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name)) - pgBackRestLogPathSet = true - } } - // If no log path was set, don't log because the default path is not writable. - if !pgBackRestLogPathSet { + // If no log path was provided, don't log because the default path is not writable. + // Otherwise, set the log-path. + if logPath == "" { global.Set("log-level-file", "off") + } else { + global.Set("log-path", logPath) } for option, val := range globalConfig { @@ -818,3 +798,24 @@ func serverConfig(cluster *v1beta1.PostgresCluster) iniSectionSet { "global:server": server, } } + +// generateRepoHostLogPath takes a postgrescluster and returns the log path that +// should be used by pgbackrest in the Repo Host Pod based on the repos specified +// and whether the user has specified a log path. +// +// This function assumes that the backups/pgbackrest spec is present in cluster. +func generateRepoHostLogPath(cluster *v1beta1.PostgresCluster) string { + for _, repo := range cluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume != nil { + // If the user has set a log path in the spec, use it. + // Otherwise, default to /pgbackrest/repo#/log + if cluster.Spec.Backups.PGBackRest.RepoHost != nil && + cluster.Spec.Backups.PGBackRest.RepoHost.Log != nil && + cluster.Spec.Backups.PGBackRest.RepoHost.Log.Path != "" { + return cluster.Spec.Backups.PGBackRest.RepoHost.Log.Path + } + return fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) + } + } + return "" +} diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index 4617b3a80a..e6ca0b2a76 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -18,6 +18,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -451,6 +452,104 @@ pg1-socket-path = /tmp/postgres `, "\t\n")+"\n") }) + t.Run("LoggingToAdditionalVolume", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.UID = "guitar" + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + cluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test", + }, + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "some-pvc", + Name: "test", + }, + }, + }, + } + + configmap, err := CreatePGBackRestConfigMapIntent(context.Background(), cluster, + "repo-hostname", "anumber", "pod-service-name", "test-ns", "", + []string{"some-instance"}) + + assert.NilError(t, err) + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + + assert.Equal(t, configmap.Data["config-hash"], "anumber") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@guitar=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` +repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +repo1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +repo1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +repo1-host-type = tls +repo1-host-user = postgres +repo1-path = /pgbackrest/repo1 +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-path = /volumes/test +repo1-path = /pgbackrest/repo1 + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], "") + }) + t.Run("CustomMetadata", func(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Metadata = &v1beta1.Metadata{ @@ -691,15 +790,14 @@ func TestReloadCommandPrettyYAML(t *testing.T) { func TestRestoreCommand(t *testing.T) { shellcheck := require.ShellCheck(t) - pgdata := "/pgdata/pg13" - opts := []string{ - "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, - "--repo=1"} - command := RestoreCommand(pgdata, "try", "", nil, strings.Join(opts, " ")) + command := RestoreCommand(19, "/pgdata/pg13", postgres.NewParameterSet(), "--repo=1") assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) assert.Assert(t, len(command) > 3) + assert.Assert(t, cmp.Contains(command[3], "/usr/pgsql-19/bin"), + "expected path to PostgreSQL binaries") + dir := t.TempDir() file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) @@ -712,17 +810,20 @@ func TestRestoreCommand(t *testing.T) { func TestRestoreCommandPrettyYAML(t *testing.T) { assert.Assert(t, cmp.MarshalContains( - RestoreCommand("/dir", "try", "", nil, "--options"), + RestoreCommand(9, "/dir", postgres.NewParameterSet(), "--options"), "\n- |", ), "expected literal block scalar") } func TestRestoreCommandTDE(t *testing.T) { + params := postgres.NewParameterSet() + params.Add("encryption_key_command", "whatever") + assert.Assert(t, cmp.MarshalContains( - RestoreCommand("/dir", "try", "echo testValue", nil, "--options"), - "encryption_key_command = 'echo testValue'", + RestoreCommand(20, "/dir", params, "--options"), + "encryption_key_command = 'whatever'", ), "expected encryption_key_command setting") } @@ -799,3 +900,89 @@ log-level-stderr = error log-timestamp = n `) } + +func TestGenerateRepoHostLogPath(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Namespace = "ns1" + cluster.Name = "hippo-dance" + + cluster.Spec.Port = initialize.Int32(2345) + cluster.Spec.PostgresVersion = 12 + + cluster.Spec.Backups = v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + } + + t.Run("NoReposNoRepoHost", func(t *testing.T) { + cluster := cluster.DeepCopy() + assert.Equal(t, generateRepoHostLogPath(cluster), "") + }) + + t.Run("NoVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "") + }) + + t.Run("OneVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo1/log") + }) + + t.Run("TwoVolumeRepos", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + { + Name: "repo2", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo1/log") + }) + + t.Run("VolumeRepoNotFirst", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{}, + }, + { + Name: "repo2", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo2/log") + }) + + t.Run("LogPathSpecified", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + cluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/some/directory", + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/some/directory") + }) +} diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 1e9f52a7e7..ebefc9dd6c 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -264,6 +264,17 @@ func Environment(cluster *v1beta1.PostgresCluster) []corev1.EnvVar { } } +// ShellPath returns a POSIX shell command that prepends typical Postgres executable paths to the PATH variable. +func ShellPath(postgresVersion int32) string { + return fmt.Sprintf(`PATH="`+ + strings.Join([]string{ + `/usr/lib/postgresql/%[1]d/bin`, // Debian + `/usr/libexec/postgresql%[1]d`, // Alpine + `/usr/pgsql-%[1]d/bin`, // Red Hat + }, ":")+ + `${PATH+:${PATH}}"`, postgresVersion) +} + // reloadCommand returns an entrypoint that convinces PostgreSQL to reload // certificate files when they change. The process will appear as name in `ps` // and `top`. @@ -298,13 +309,24 @@ func reloadCommand( // descriptor gets closed and reopened to use the builtin `[ -nt` to check // mtimes. // - https://unix.stackexchange.com/a/407383 + // + // In the manageAutogrowAnnotation function below, df is used to return the + // relevant volume size in Mebibytes. The 'read' variable gets the value from + // the '1M-blocks' output (second column) and the 'use' variable gets the value + // from the 'Use%' column (fifth column). This value is grabbed after stripping + // out the column headers (before the '\n') and then getting the respective + // value delimited by the white spaces by using the 'read -r' command. + // The underscores (_) discard fields and the variables store them. This allows + // for selective parsing of the provided lines. The percent value is stripped of + // the '%' and then used to determine if a expansion should be triggered by + // setting the calculated volume size using the 'size' variable. script := fmt.Sprintf(` # Parameters for curl when managing autogrow annotation. APISERVER="https://kubernetes.default.svc" SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" -NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) -TOKEN=$(cat ${SERVICEACCOUNT}/token) -CACERT=${SERVICEACCOUNT}/ca.crt +NAMESPACE=$(cat "${SERVICEACCOUNT}/namespace") +TOKEN=$(cat "${SERVICEACCOUNT}/token") +CACERT="${SERVICEACCOUNT}/ca.crt" # Manage autogrow annotation. # Return size in Mebibytes. @@ -313,27 +335,29 @@ manageAutogrowAnnotation() { local trigger=$2 local maxGrow=$3 - size=$(df --block-size=M /"${volume}" | awk 'FNR == 2 {print $2}') - use=$(df /"${volume}" | awk 'FNR == 2 {print $5}') + size=$(df --block-size=M /"${volume}") + read -r _ size _ <<< "${size#*$'\n'}" + use=$(df /"${volume}") + read -r _ _ _ _ use _ <<< "${use#*$'\n'}" sizeInt="${size//M/}" # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. - useInt=$(echo $use | sed 's/[[:punct:]]//g') + useInt=${use//[[:punct:]]/} triggerExpansion="$((useInt > trigger))" - if [[ $triggerExpansion -eq 1 ]]; then + if [[ ${triggerExpansion} -eq 1 ]]; then newSize="$(((sizeInt / 2)+sizeInt))" # Only compare with maxGrow if it is set (not empty) - if [[ -n "$maxGrow" ]]; then + if [[ -n "${maxGrow}" ]]; then # check to see how much we would normally grow sizeDiff=$((newSize - sizeInt)) # Compare the size difference to the maxGrow; if it is greater, cap it to maxGrow - if [[ $sizeDiff -gt $maxGrow ]]; then + if [[ ${sizeDiff} -gt ${maxGrow} ]]; then newSize=$((sizeInt + maxGrow)) fi fi newSizeMi="${newSize}Mi" - d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"$newSizeMi"'"}]' - curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"${newSizeMi}"'"}]' + curl --cacert "${CACERT}" --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "${d}" fi } @@ -423,8 +447,8 @@ func startupCommand( `(`+shell.MakeDirectories(dataMountPath, naming.PatroniPGDataLogPath)+`) ||`, `halt "$(permissions `+shell.QuoteWord(naming.PatroniPGDataLogPath)+` ||:)"`, - `(`+shell.MakeDirectories(dataMountPath, naming.PGBackRestPGDataLogPath)+`) ||`, - `halt "$(permissions `+shell.QuoteWord(naming.PGBackRestPGDataLogPath)+` ||:)"`, + `(`+shell.MakeDirectories(dataMountPath, util.GetPGBackRestLogPathForInstance(cluster))+`) ||`, + `halt "$(permissions `+shell.QuoteWord(util.GetPGBackRestLogPathForInstance(cluster))+` ||:)"`, ) pg_rewind_override := "" diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index cd4962be79..ffd227f4b8 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -18,9 +18,11 @@ import ( "time" "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -42,7 +44,7 @@ func TestDataDirectory(t *testing.T) { func TestDataStorage(t *testing.T) { cluster := new(v1beta1.PostgresCluster) - cluster.Spec.PostgresVersion = rand.IntN(20) + cluster.Spec.PostgresVersion = rand.Int32N(20) assert.Equal(t, DataStorage(cluster), "/pgdata") } @@ -256,7 +258,10 @@ func TestBashRecreateDirectory(t *testing.T) { filepath.Join(dir, "d"), "0740") // The assertion below expects alphabetically sorted filenames. // Set an empty environment to always use the default/standard locale. - cmd.Env = []string{} + cmd.Env = []string{ + // Preserve the path to find bash tools (i.e., mktemp) + "PATH=" + os.Getenv("PATH"), + } output, err := cmd.CombinedOutput() assert.NilError(t, err, string(output)) assert.Assert(t, cmp.Regexp(`^`+ @@ -542,6 +547,37 @@ func TestBashSafeLink(t *testing.T) { }) } +func TestShellPath(t *testing.T) { + t.Parallel() + + script := ShellPath(11) + + assert.Assert(t, cmp.Contains(script, `/usr/lib/postgresql/11/bin`)) + assert.Assert(t, cmp.Contains(script, `/usr/libexec/postgresql11`)) + assert.Assert(t, cmp.Contains(script, `/usr/pgsql-11/bin`)) + + t.Run("ShellCheckPOSIX", func(t *testing.T) { + shellcheck := require.ShellCheck(t) + + dir := t.TempDir() + file := filepath.Join(dir, "script.sh") + assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) + + // Expect ShellCheck for "sh" to be happy. + // - https://www.shellcheck.net/wiki/SC2148 + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", "--shell=sh", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + }) + + t.Run("PrettyYAML", func(t *testing.T) { + b, err := yaml.Marshal(script) + assert.NilError(t, err) + assert.Assert(t, !strings.Contains(string(b), `\n`), "expected literal flow scalar, got:\n%s", b) + assert.Equal(t, 1, strings.Count(string(b), "\n"), "expected one trailing newline, got:\n%s", b) + }) +} + func TestStartupCommand(t *testing.T) { shellcheck := require.ShellCheck(t) t.Parallel() @@ -597,3 +633,44 @@ EOF chmod +x /tmp/pg_rewind_tde.sh`)) }) } + +func TestReloadCommand(t *testing.T) { + shellcheck := require.ShellCheck(t) + + pgdataSize := resource.MustParse("1Gi") + pgwalSize := resource.MustParse("2Gi") + + command := reloadCommand( + "some-name", + &v1beta1.VolumeClaimSpecWithAutoGrow{ + AutoGrow: &v1beta1.AutoGrowSpec{ + Trigger: initialize.Int32(10), + MaxGrow: &pgdataSize, + }, + }, + &v1beta1.VolumeClaimSpecWithAutoGrow{ + AutoGrow: &v1beta1.AutoGrowSpec{ + Trigger: initialize.Int32(20), + MaxGrow: &pgwalSize, + }, + }, + ) + + // Expect a bash command with an inline script. + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) + + // Write out that inline script. + dir := t.TempDir() + file := filepath.Join(dir, "script.bash") + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) + + // Expect shellcheck to be happy. + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) + + assert.Assert(t, cmp.Contains(command[3], "manageAutogrowAnnotation \"pgdata\" \"10\" \"1024\"")) + assert.Assert(t, cmp.Contains(command[3], "manageAutogrowAnnotation \"pgwal\" \"20\" \"2048\"")) + +} diff --git a/internal/postgres/huge_pages.go b/internal/postgres/huge_pages.go index b38120bafd..9dd408ba32 100644 --- a/internal/postgres/huge_pages.go +++ b/internal/postgres/huge_pages.go @@ -16,11 +16,11 @@ import ( // This function looks for a valid huge_pages resource request. If it finds one, // it sets the PostgreSQL parameter "huge_pages" to "try". If it doesn't find // one, it sets "huge_pages" to "off". -func SetHugePages(cluster *v1beta1.PostgresCluster, pgParameters *Parameters) { +func SetHugePages(cluster *v1beta1.PostgresCluster, params *ParameterSet) { if HugePagesRequested(cluster) { - pgParameters.Default.Add("huge_pages", "try") + params.Add("huge_pages", "try") } else { - pgParameters.Default.Add("huge_pages", "off") + params.Add("huge_pages", "off") } } diff --git a/internal/postgres/huge_pages_test.go b/internal/postgres/huge_pages_test.go index 9b9f12172f..69528d568c 100644 --- a/internal/postgres/huge_pages_test.go +++ b/internal/postgres/huge_pages_test.go @@ -27,11 +27,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "off") }) t.Run("hugepages quantity not set", func(t *testing.T) { @@ -48,11 +48,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "off") }) t.Run("hugepages set to zero", func(t *testing.T) { @@ -68,11 +68,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "off") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "off") }) t.Run("hugepages set correctly", func(t *testing.T) { @@ -88,11 +88,11 @@ func TestSetHugePages(t *testing.T) { }, }} - pgParameters := NewParameters() - SetHugePages(cluster, &pgParameters) + params := NewParameterSet() + SetHugePages(cluster, params) - assert.Equal(t, pgParameters.Default.Has("huge_pages"), true) - assert.Equal(t, pgParameters.Default.Value("huge_pages"), "try") + assert.Equal(t, params.Has("huge_pages"), true) + assert.Equal(t, params.Value("huge_pages"), "try") }) } diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index a72672824f..73ac1125de 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -173,9 +173,9 @@ containers: # Parameters for curl when managing autogrow annotation. APISERVER="https://kubernetes.default.svc" SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" - NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) - TOKEN=$(cat ${SERVICEACCOUNT}/token) - CACERT=${SERVICEACCOUNT}/ca.crt + NAMESPACE=$(cat "${SERVICEACCOUNT}/namespace") + TOKEN=$(cat "${SERVICEACCOUNT}/token") + CACERT="${SERVICEACCOUNT}/ca.crt" # Manage autogrow annotation. # Return size in Mebibytes. @@ -184,27 +184,29 @@ containers: local trigger=$2 local maxGrow=$3 - size=$(df --block-size=M /"${volume}" | awk 'FNR == 2 {print $2}') - use=$(df /"${volume}" | awk 'FNR == 2 {print $5}') + size=$(df --block-size=M /"${volume}") + read -r _ size _ <<< "${size#*$'\n'}" + use=$(df /"${volume}") + read -r _ _ _ _ use _ <<< "${use#*$'\n'}" sizeInt="${size//M/}" # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. - useInt=$(echo $use | sed 's/[[:punct:]]//g') + useInt=${use//[[:punct:]]/} triggerExpansion="$((useInt > trigger))" - if [[ $triggerExpansion -eq 1 ]]; then + if [[ ${triggerExpansion} -eq 1 ]]; then newSize="$(((sizeInt / 2)+sizeInt))" # Only compare with maxGrow if it is set (not empty) - if [[ -n "$maxGrow" ]]; then + if [[ -n "${maxGrow}" ]]; then # check to see how much we would normally grow sizeDiff=$((newSize - sizeInt)) # Compare the size difference to the maxGrow; if it is greater, cap it to maxGrow - if [[ $sizeDiff -gt $maxGrow ]]; then + if [[ ${sizeDiff} -gt ${maxGrow} ]]; then newSize=$((sizeInt + maxGrow)) fi fi newSizeMi="${newSize}Mi" - d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"$newSizeMi"'"}]' - curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + d='[{"op": "add", "path": "/metadata/annotations/suggested-'"${volume}"'-pvc-size", "value": "'"${newSizeMi}"'"}]' + curl --cacert "${CACERT}" --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "${d}" fi } diff --git a/internal/postgres/versions.go b/internal/postgres/versions.go index 17d067966d..bf700d9729 100644 --- a/internal/postgres/versions.go +++ b/internal/postgres/versions.go @@ -20,7 +20,7 @@ var finalReleaseDates = map[int]time.Time{ // ReleaseIsFinal returns whether or not t is definitively past the final // scheduled release of a Postgres version. -func ReleaseIsFinal(majorVersion int, t time.Time) bool { - known, ok := finalReleaseDates[majorVersion] +func ReleaseIsFinal[N ~int | ~int32](majorVersion N, t time.Time) bool { + known, ok := finalReleaseDates[int(majorVersion)] return ok && t.After(known) } diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go deleted file mode 100644 index a4c052ee8f..0000000000 --- a/internal/testing/validation/postgrescluster_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package validation - -import ( - "context" - "testing" - - "gotest.tools/v3/assert" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/internal/testing/require" - v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func TestPostgresUserInterfaceAcrossVersions(t *testing.T) { - ctx := context.Background() - cc := require.Kubernetes(t) - t.Parallel() - - namespace := require.Namespace(t, cc) - - base := v1beta1.NewPostgresCluster() - // Start with a bunch of required fields. - base.Namespace = namespace.Name - base.Name = "postgres-pgadmin" - require.UnmarshalInto(t, &base.Spec, `{ - userInterface: { - pgAdmin: { - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Mi } }, - }, - }, - }, - postgresVersion: 16, - instances: [{ - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Mi } }, - }, - }], - }`) - - v1base := v1.NewPostgresCluster() - // Start with a bunch of required fields. - v1base.Namespace = namespace.Name - v1base.Name = "postgres-pgadmin" - require.UnmarshalInto(t, &v1base.Spec, `{ - userInterface: { - pgAdmin: { - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Mi } }, - }, - }, - }, - postgresVersion: 16, - instances: [{ - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Mi } }, - }, - }], - }`) - - t.Run("v1beta1 is valid with pgadmin", func(t *testing.T) { - assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), - "expected this base cluster to be valid") - }) - t.Run("v1 is invalid with pgadmin", func(t *testing.T) { - assert.ErrorContains(t, cc.Create(ctx, v1base.DeepCopy(), client.DryRunAll), - "userInterface not available in v1") - }) - - t.Run("v1 is valid with pgadmin but only if unchanged from v1beta1", func(t *testing.T) { - // Validation ratcheting is enabled starting in Kubernetes 1.30 - require.KubernetesAtLeast(t, "1.30") - - // A v1 that has been updated from a v1beta1 with no change to the userInterface is valid - assert.NilError(t, cc.Create(ctx, base), - "expected this base cluster to be valid") - v1base.ResourceVersion = base.ResourceVersion - assert.NilError(t, cc.Update(ctx, v1base), - "expected this v1 cluster to be a valid update") - - // But will not be valid if there's a change to the userInterface - require.UnmarshalInto(t, &v1base.Spec, `{ - userInterface: { - pgAdmin: { - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce, ReadWriteMany], - resources: { requests: { storage: 2Mi } }, - }, - }, - }, - }`) - - assert.ErrorContains(t, cc.Update(ctx, v1base), - "userInterface not available in v1") - }) -} diff --git a/internal/util/pgbackrest.go b/internal/util/pgbackrest.go new file mode 100644 index 0000000000..8452c16b9d --- /dev/null +++ b/internal/util/pgbackrest.go @@ -0,0 +1,26 @@ +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "path/filepath" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// GetInstanceLogPath is responsible for determining the appropriate log path for pgbackrest +// in instance pods. If the user has set a log path via the spec, use that. Otherwise, use +// the default log path set in the naming package. Ensure trailing slashes are trimmed. +// +// This function assumes that the backups/pgbackrest spec is present in postgresCluster. +func GetPGBackRestLogPathForInstance(postgresCluster *v1beta1.PostgresCluster) string { + logPath := naming.PGBackRestPGDataLogPath + if postgresCluster.Spec.Backups.PGBackRest.Log != nil && + postgresCluster.Spec.Backups.PGBackRest.Log.Path != "" { + logPath = postgresCluster.Spec.Backups.PGBackRest.Log.Path + } + return filepath.Clean(logPath) +} diff --git a/internal/util/pgbackrest_test.go b/internal/util/pgbackrest_test.go new file mode 100644 index 0000000000..e654436afa --- /dev/null +++ b/internal/util/pgbackrest_test.go @@ -0,0 +1,42 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGetPGBackRestLogPathForInstance(t *testing.T) { + t.Run("NoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + }, + }, + } + assert.Equal(t, GetPGBackRestLogPathForInstance(postgrescluster), naming.PGBackRestPGDataLogPath) + }) + + t.Run("SpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log", + }, + }, + }, + }, + } + assert.Equal(t, GetPGBackRestLogPathForInstance(postgrescluster), "/volumes/test/log") + }) +} diff --git a/internal/util/volumes.go b/internal/util/volumes.go index 4151eef76b..9b550ad4ab 100644 --- a/internal/util/volumes.go +++ b/internal/util/volumes.go @@ -58,7 +58,13 @@ func addVolumesAndMounts(pod *corev1.PodSpec, volumes []v1beta1.AdditionalVolume missingContainers := []string{} for _, spec := range volumes { - mount := namer(spec.Name, spec.ReadOnly) + // If it is an image volume, override readOnly to true + readOnly := spec.ReadOnly + if spec.Image != nil { + readOnly = true + } + + mount := namer(spec.Name, readOnly) pod.Volumes = append(pod.Volumes, spec.AsVolume(mount.Name)) // Create a set of all the requested containers, diff --git a/internal/util/volumes_test.go b/internal/util/volumes_test.go index ee5ebaff9e..ff2d0e7622 100644 --- a/internal/util/volumes_test.go +++ b/internal/util/volumes_test.go @@ -207,6 +207,60 @@ func TestAddAdditionalVolumesAndMounts(t *testing.T) { claimName: required readOnly: true`, expectedMissing: []string{}, + }, { + tcName: "image volumes - readOnly overridden true", + additionalVolumes: []v1beta1.AdditionalVolume{{ + Containers: []string{"database"}, + Image: &corev1.ImageVolumeSource{ + Reference: "some-image-name", + PullPolicy: corev1.PullAlways, + }, + Name: "required", + ReadOnly: true, + }, { + Image: &corev1.ImageVolumeSource{ + Reference: "another-image-name", + PullPolicy: corev1.PullAlways, + }, + Name: "other", + ReadOnly: false, + }}, + expectedContainers: `- name: database + resources: {} + volumeMounts: + - mountPath: /volumes/required + name: volumes-required + readOnly: true + - mountPath: /volumes/other + name: volumes-other + readOnly: true +- name: other + resources: {} + volumeMounts: + - mountPath: /volumes/other + name: volumes-other + readOnly: true`, + expectedInitContainers: `- name: startup + resources: {} + volumeMounts: + - mountPath: /volumes/other + name: volumes-other + readOnly: true +- name: config + resources: {} + volumeMounts: + - mountPath: /volumes/other + name: volumes-other + readOnly: true`, + expectedVolumes: `- image: + pullPolicy: Always + reference: some-image-name + name: volumes-required +- image: + pullPolicy: Always + reference: another-image-name + name: volumes-other`, + expectedMissing: []string{}, }} for _, tc := range testCases { diff --git a/licenses/.gitignore b/licenses/.gitignore deleted file mode 100644 index 72e8ffc0db..0000000000 --- a/licenses/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/licenses/LICENSE.txt b/licenses/LICENSE.txt index e799dc3209..57f1773676 100644 --- a/licenses/LICENSE.txt +++ b/licenses/LICENSE.txt @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. + Copyright 2017 - 2025 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/apis/postgres-operator.crunchydata.com/README.md b/pkg/apis/postgres-operator.crunchydata.com/README.md new file mode 100644 index 0000000000..ef314de19b --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/README.md @@ -0,0 +1,121 @@ + + +# Custom Resource Definitions + +These directories contain Go types that serve as [DTO]s for communicating with the [Kubernetes API]. +We use [controller-gen] to produce [CRD]s based on these Go types with [schemas](validation.md) that match. + +This [directory](.) contains our API Group, `postgres-operator.crunchydata.com`, and each subdirectory is a version: + +- v1beta1 is compatible with Kubernetes 1.30, OpenShift 4.14, and later +- v1 uses newer CRD features and requires Kubernetes 1.30, OpenShift 4.17, and later + +``` +pkg/apis/postgres-operator.crunchydata.com +├── v1 +└── v1beta1 +``` + +[controller-gen]: https://book.kubebuilder.io/reference/controller-gen +[CRD]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions +[DTO]: https://martinfowler.com/eaaCatalog/dataTransferObject.html +[Kubernetes API]: https://docs.k8s.io/concepts/overview/kubernetes-api + + +# CRD Versions + +Kubernetes organizes API resources into Groups. Each resource is represented by a Kind that can have multiple Versions. The shape of a CRD reflects this: + +```yaml +kind: CustomResourceDefinition +metadata: + name: "ideas.example.com" # {spec.plural}.{spec.group} +spec: + group: "example.com" # one group (G) + names: + kind: Idea # one kind (K) + plural: ideas # one resource (R) + singular: idea # one resource (R) + versions: # many versions (V) + - name: v1beta1 + schema: … + - name: v1 + schema: … +``` + + + +Every Kubernetes API request includes the Group, Resource, Version, and Kind of its payload and expected response. +The version affects how Kubernetes handles the request, but it does *not* affect how Kubernetes stores the result. +Every Kubernetes [object] is stored according to its Group, Resource, Namespace, and Name. + +> [!NOTE] +> - The API request URL contains the Group + Version + Resource (GVR). +> - The API request body includes the Group + Version (GV) as [`apiVersion`] and Kind (K) as `kind`. +> - [RBAC] matches on the Group + Resource (GR) of an API request. +> - The etcd key of each object contains the Group + Resource (GR), Namespace and Name. + +This allows a variety of clients to concurrently use whichever API versions they understand. +Kubernetes converts what is stored to or from the version in the API request. +This means, however, that *every* version of a resource **must** be equivalent *every other* version. + +Each CRD indicates which versions Kubernetes should accept from clients with `served=true`. +Kubernetes stores custom resource objects in the *single* version indicated with `storage=true`. + +> [!IMPORTANT] +> We use the `None` conversion strategy and [validation ratcheting](validation.md#validation-ratcheting)... + +[`apiVersion`]: https://docs.k8s.io/reference/using-api#api-groups +[object]: https://docs.k8s.io/concepts/overview/working-with-objects +[RBAC]: https://docs.k8s.io/reference/kubernetes-api/authorization-resources/role-v1 + + diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go new file mode 100644 index 0000000000..77076a5de3 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go @@ -0,0 +1,18 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// PGBackRestArchive defines a pgBackRest archive configuration +// +kubebuilder:validation:XValidation:rule=`!self.?log.path.hasValue() || self.log.path.startsWith("/volumes/")`,message=`pgbackrest sidecar log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?repoHost.log.path.hasValue() || self.repoHost.volumes.additional.exists(x, self.repoHost.log.path.startsWith("/volumes/"+x.name))`,message=`repo host log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?jobs.log.path.hasValue() || self.jobs.volumes.additional.exists(x, self.jobs.log.path.startsWith("/volumes/"+x.name))`,message=`backup jobs log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?global["log-path"].hasValue()`,message=`pgbackrest log-path must be set via the various log.path fields in the spec` +type PGBackRestArchive struct { + v1beta1.PGBackRestArchive `json:",inline"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index 2c3b9c4112..a8aaa59363 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -21,11 +21,11 @@ import ( // // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "volumes.temp" to log in "/pgtmp"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgtmp/logs/postgres") || self.instances.all(i, i.?volumes.temp.hasValue())).orValue(true)` // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "walVolumeClaimSpec" to log in "/pgwal"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgwal/logs/postgres") || self.instances.all(i, i.?walVolumeClaimSpec.hasValue())).orValue(true)` +// +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need an additional volume to log in "/volumes"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)` // -// +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need an additional volume to log in "/volumes"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue())).orValue(true)` +// # pgBackRest Logging // -// TODO: Also check the above path against volume names: `i.?volumes.additional.hasValue() && i.volumes.additional.exists(directory.startsWith("/volumes/" + volume.name))` -// https://github.com/kubernetes-sigs/controller-tools/pull/1270#issuecomment-3272211184 +// +kubebuilder:validation:XValidation:fieldPath=`.backups.pgbackrest.log.path`,message=`all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`,rule=`self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)` type PostgresClusterSpec struct { // +optional Metadata *v1beta1.Metadata `json:"metadata,omitempty"` @@ -72,6 +72,7 @@ type PostgresClusterSpec struct { // namespace as the cluster. // +optional DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` + // Whether or not the PostgreSQL cluster should use the defined default // scheduling constraints. If the field is unset or false, the default // scheduling constraints will be used in addition to any custom constraints @@ -93,11 +94,6 @@ type PostgresClusterSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` @@ -110,9 +106,11 @@ type PostgresClusterSpec struct { // Specifies one or more sets of PostgreSQL pods that replicate data for // this cluster. + // --- // +listType=map // +listMapKey=name // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 InstanceSets []PostgresInstanceSetSpec `json:"instances"` @@ -144,9 +142,9 @@ type PostgresClusterSpec struct { // The major version of PostgreSQL installed in the PostgreSQL image // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // The PostGIS extension version installed in the PostgreSQL image. // When image is not set, indicates a PostGIS enabled image will be used. @@ -358,7 +356,7 @@ type Backups struct { // pgBackRest archive configuration // +optional - PGBackRest v1beta1.PGBackRestArchive `json:"pgbackrest"` + PGBackRest PGBackRestArchive `json:"pgbackrest"` // VolumeSnapshot configuration // +optional @@ -393,7 +391,7 @@ type PostgresClusterStatus struct { // Stores the current PostgreSQL major version following a successful // major PostgreSQL upgrade. // +optional - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // Current state of the PostgreSQL proxy. // +optional @@ -540,6 +538,8 @@ type PostgresInstanceSetSpec struct { // +optional TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` + // Volumes to be added to the instance set. + // +optional Volumes *v1beta1.PostgresVolumesSpec `json:"volumes,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go index 80043ab766..46d1817070 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go @@ -182,6 +182,22 @@ func (in *MonitoringStatus) DeepCopy() *MonitoringStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { + *out = *in + in.PGBackRestArchive.DeepCopyInto(&out.PGBackRestArchive) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGBackRestArchive. +func (in *PGBackRestArchive) DeepCopy() *PGBackRestArchive { + if in == nil { + return nil + } + out := new(PGBackRestArchive) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { *out = *in diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go index 89b464a248..c8606d6e81 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go @@ -52,10 +52,6 @@ type CrunchyBridgeClusterSpec struct { // The cloud provider where the cluster is located. // Currently Bridge offers aws, azure, and gcp only // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // // +kubebuilder:validation:Required // +kubebuilder:validation:Enum={aws,azure,gcp} // +kubebuilder:validation:XValidation:rule=`self == oldSelf`,message="immutable" diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go index 5ab1b2792c..3d4d9bda50 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go @@ -68,10 +68,6 @@ type PatroniLogConfig struct { // The Patroni log level. // More info: https://docs.python.org/3/library/logging.html#levels // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // // +default="INFO" // +kubebuilder:validation:Enum={CRITICAL,ERROR,WARNING,INFO,DEBUG,NOTSET} // +optional @@ -96,10 +92,6 @@ type PatroniSwitchover struct { // factors. A TargetInstance must be specified to failover. // NOTE: The Failover type is reserved as the "last resort" case. // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:validation:Enum={Switchover,Failover} // +kubebuilder:default:=Switchover // +optional diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go index d9777bdcd5..0f87676a72 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go @@ -89,7 +89,7 @@ type PGBackRestArchive struct { // +optional Metadata *Metadata `json:"metadata,omitempty"` - // Projected volumes containing custom pgBackRest configuration. These files are mounted + // Projected volumes containing custom pgBackRest configuration. These files are mounted // under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the // PostgreSQL Operator: // https://pgbackrest.org/configuration.html @@ -113,6 +113,10 @@ type PGBackRestArchive struct { // +optional Jobs *BackupJobs `json:"jobs,omitempty"` + // Logging configuration for pgbackrest processes running in postgres instance pods. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Defines a pgBackRest repository // +kubebuilder:validation:MinItems=1 // +listType=map @@ -155,6 +159,10 @@ type BackupJobs struct { // +optional Resources corev1.ResourceRequirements `json:"resources,omitzero"` + // Logging configuration for pgbackrest processes running in Backup Job Pods. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Priority class name for the pgBackRest backup Job pods. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ // +optional @@ -215,6 +223,10 @@ type PGBackRestRepoHost struct { // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Logging configuration for pgbackrest processes running in the repo host pod. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Priority class name for the pgBackRest repo host pod. Changing this value // causes PostgreSQL to restart. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go index 805ce1a16d..56138a61fa 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go @@ -29,11 +29,6 @@ type PGUpgradeSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` @@ -84,7 +79,7 @@ type PGUpgradeSettings struct { // The major version of PostgreSQL before the upgrade. // --- // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +required FromPostgresVersion int32 `json:"fromPostgresVersion"` @@ -98,7 +93,7 @@ type PGUpgradeSettings struct { // The major version of PostgreSQL to be upgraded to. // --- // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +required ToPostgresVersion int32 `json:"toPostgresVersion"` @@ -110,10 +105,6 @@ type PGUpgradeSettings struct { // - Clone since 12: https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/pg_upgrade/pg_upgrade.h;hb=REL_12_0#l232 // - CopyFileRange since 17: https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/pg_upgrade/pg_upgrade.h;hb=REL_17_0#l251 // - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:validation:Enum={Clone,Copy,CopyFileRange,Link} // +optional TransferMethod string `json:"transferMethod,omitempty"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go index 06658065b6..2880c565e0 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go @@ -175,10 +175,6 @@ type PostgresPasswordSpec struct { // "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. // "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set. // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:default=ASCII // +kubebuilder:validation:Enum={ASCII,AlphaNumeric} // +required diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 4374fa5e4e..ed539341d7 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -59,6 +59,7 @@ type PostgresClusterSpec struct { // namespace as the cluster. // +optional DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` + // Whether or not the PostgreSQL cluster should use the defined default // scheduling constraints. If the field is unset or false, the default // scheduling constraints will be used in addition to any custom constraints @@ -72,6 +73,9 @@ type PostgresClusterSpec struct { // e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, // the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, // e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1. + // --- + // [corev1.Container.Image] + // // +optional // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 Image string `json:"image,omitempty"` @@ -80,11 +84,6 @@ type PostgresClusterSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` @@ -131,9 +130,9 @@ type PostgresClusterSpec struct { // The major version of PostgreSQL installed in the PostgreSQL image // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=11 - // +kubebuilder:validation:Maximum=17 + // +kubebuilder:validation:Maximum=18 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // The PostGIS extension version installed in the PostgreSQL image. // When image is not set, indicates a PostGIS enabled image will be used. @@ -379,7 +378,7 @@ type PostgresClusterStatus struct { // Stores the current PostgreSQL major version following a successful // major PostgreSQL upgrade. // +optional - PostgresVersion int `json:"postgresVersion"` + PostgresVersion int32 `json:"postgresVersion"` // Current state of the PostgreSQL proxy. // +optional @@ -526,6 +525,8 @@ type PostgresInstanceSetSpec struct { // +optional TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` + // Volumes to be added to the instance set. + // +optional Volumes *PostgresVolumesSpec `json:"volumes,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index 4f276a8d07..b276213f6b 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -244,10 +244,6 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +optional // +kubebuilder:default=ClusterIP // +kubebuilder:validation:Enum={ClusterIP,NodePort,LoadBalancer} @@ -265,11 +261,6 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // +kubebuilder:validation:Type=string - // // +optional // +kubebuilder:validation:Enum={Cluster,Local} InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` @@ -277,10 +268,6 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-policies // --- // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=10 - // +kubebuilder:validation:Type=string - // // +optional // +kubebuilder:validation:Enum={Cluster,Local} ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` @@ -324,14 +311,17 @@ func (meta *Metadata) GetAnnotationsOrNil() map[string]string { // Only one applier should be managing each volume definition. // https://docs.k8s.io/reference/using-api/server-side-apply#merge-strategy // +structType=atomic +// +// +kubebuilder:validation:XValidation:rule=`has(self.claimName) != has(self.image)`,message=`you must set only one of image or claimName` +// +kubebuilder:validation:XValidation:rule=`!has(self.image) || !has(self.readOnly) || self.readOnly`,message=`image volumes must be readOnly` type AdditionalVolume struct { // Name of an existing PersistentVolumeClaim. // --- // https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/core/validation#ValidatePersistentVolumeClaim // https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/core/validation#ValidatePersistentVolumeName // - // +required - ClaimName DNS1123Subdomain `json:"claimName"` + // +optional + ClaimName DNS1123Subdomain `json:"claimName,omitempty"` // The names of containers in which to mount this volume. // The default mounts the volume in *all* containers. An empty list does not mount the volume to any containers. @@ -346,6 +336,15 @@ type AdditionalVolume struct { // +optional Containers []DNS1123Label `json:"containers"` + // Reference to an image or OCI artifact. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#image + // --- + // Use "title" to add more validation in [internal/crd/post-process.jq]. + // +kubebuilder:title=$corev1.ImageVolumeSource + // + // +optional + Image *corev1.ImageVolumeSource `json:"image,omitempty"` + // The name of the directory in which to mount this volume. // Volumes are mounted in containers at `/volumes/{name}`. // --- @@ -379,7 +378,16 @@ func (in *AdditionalVolume) AsVolume(name string) corev1.Volume { ClaimName: in.ClaimName, ReadOnly: in.ReadOnly, } + case in.Image != nil: + out.Image = in.Image.DeepCopy() } return out } + +// LoggingConfiguration provides logging configuration for various components +type LoggingConfiguration struct { + // +kubebuilder:validation:MaxLength=256 + // +optional + Path string `json:"path,omitempty"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go index 1185321fe9..8a1f22be2d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go @@ -45,6 +45,25 @@ func TestAdditionalVolumeAsVolume(t *testing.T) { assert.DeepEqual(t, out, expected) }) }) + + t.Run("Image", func(t *testing.T) { + in := v1beta1.AdditionalVolume{Image: &corev1.ImageVolumeSource{ + Reference: "jkl;", + PullPolicy: corev1.PullAlways, + }} + out := in.AsVolume("asdf") + + var expected corev1.Volume + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: asdf, + image: { + reference: jkl;, + pullPolicy: Always, + }, + }`), &expected)) + + assert.DeepEqual(t, out, expected) + }) } func TestDurationAsDuration(t *testing.T) { diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go index eacf54e365..e1147eb3df 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go @@ -105,11 +105,6 @@ type PGAdminSpec struct { // pull (download) container images. // More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // +kubebuilder:validation:Type=string - // // +kubebuilder:validation:Enum={Always,Never,IfNotPresent} // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` @@ -207,10 +202,6 @@ type PGAdminUser struct { // Role determines whether the user has admin privileges or not. // Defaults to User. Valid options are Administrator and User. // --- - // Kubernetes assumes the evaluation cost of an enum value is very large. - // TODO(k8s-1.29): Drop MaxLength after Kubernetes 1.29; https://issue.k8s.io/119511 - // +kubebuilder:validation:MaxLength=15 - // // +kubebuilder:validation:Enum={Administrator,User} // +optional Role string `json:"role,omitempty"` @@ -240,6 +231,10 @@ type PGAdminStatus struct { // +optional MajorVersion int `json:"majorVersion,omitempty"` + // MinorVersion represents the minor version of the running pgAdmin. + // +optional + MinorVersion string `json:"minorVersion,omitempty"` + // observedGeneration represents the .metadata.generation on which the status was based. // +optional // +kubebuilder:validation:Minimum=0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index 8843869827..2d1301c2df 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -41,6 +41,11 @@ func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) { *out = make([]DNS1123Label, len(*in)) copy(*out, *in) } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(corev1.ImageVolumeSource) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolume. @@ -67,6 +72,11 @@ func (in *AutoGrowSpec) DeepCopy() *AutoGrowSpec { func (in *BackupJobs) DeepCopyInto(out *BackupJobs) { *out = *in in.Resources.DeepCopyInto(&out.Resources) + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.PriorityClassName != nil { in, out := &in.PriorityClassName, &out.PriorityClassName *out = new(string) @@ -658,6 +668,21 @@ func (in *InstrumentationSpec) DeepCopy() *InstrumentationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfiguration. +func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { + if in == nil { + return nil + } + out := new(LoggingConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metadata) DeepCopyInto(out *Metadata) { *out = *in @@ -1156,6 +1181,11 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { *out = new(BackupJobs) (*in).DeepCopyInto(*out) } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.Repos != nil { in, out := &in.Repos, &out.Repos *out = make([]PGBackRestRepo, len(*in)) @@ -1374,6 +1404,11 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.PriorityClassName != nil { in, out := &in.PriorityClassName, &out.PriorityClassName *out = new(string) diff --git a/pkg/apis/postgres-operator.crunchydata.com/validation.md b/pkg/apis/postgres-operator.crunchydata.com/validation.md index 49a243d4c0..92b9fa11bd 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/validation.md +++ b/pkg/apis/postgres-operator.crunchydata.com/validation.md @@ -4,10 +4,9 @@ # SPDX-License-Identifier: Apache-2.0 --> -# Custom Resource Definitions +# Custom Resource Definition Schemas -These directories contain Go types that serve as [DTO]s for communicating with the Kubernetes API. -We use the [controller-gen] tool to produce [CRD]s with schemas that match the Go types. +These directories contain Go types that [controller-gen] uses to generate matching [CRD] schemas. The CRD schema tells Kubernetes what fields and values are allowed in our API objects and how to handle changes to values. > [!TIP] @@ -15,7 +14,7 @@ The CRD schema tells Kubernetes what fields and values are allowed in our API ob CRD schemas are modified OpenAPI 3.0 [validation] schemas. Much of the schema defines what fields, types, and values are *allowed*. -`controller-gen` considers the [Go type] of a field and its [validation markers] for this. +`controller-gen` considers the field's [Go type] and [validation markers] for this. Kubernetes uses its own algorithm to consider and accept changes to API objects: [Server-Side Apply], SSA. CRD schemas contain non-standard attributes that affect SSA. @@ -25,9 +24,6 @@ CRD schemas contain non-standard attributes that affect SSA. [controller-gen]: https://book.kubebuilder.io/reference/controller-gen [CRD]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions -[DTO]: https://martinfowler.com/eaaCatalog/dataTransferObject.html -[Go type]: https://go.dev/ref/spec#Types -[Kubernetes API]: https://docs.k8s.io/concepts/overview/kubernetes-api [processing markers]: https://book.kubebuilder.io/reference/markers/crd-processing [Server-Side Apply]: https://docs.k8s.io/reference/using-api/server-side-apply [validation]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions#validation @@ -92,7 +88,7 @@ The `additionalProperties` property indicates that the keys are unknown; these f # CEL Rules > [!IMPORTANT] -> When possible, use [OpenAPI properties](#FIXME) rather than CEL rules. +> When possible, use [OpenAPI properties](#openapi-properties) rather than CEL rules. > The former do not affect the CRD [validation budget](#FIXME). ## Optional field syntax @@ -109,3 +105,88 @@ likewise be considered optional. The optional field syntax is only available in K8s 1.29+. [optional field marker]: https://pkg.go.dev/github.com/google/cel-go/cel#hdr-Syntax_Changes-OptionalTypes. + +## CEL Availability + +Kubernetes' capabilities with CEL are continuously expanding. +Different versions of Kubernetes have different CEL functions, syntax, and features. + +```asciidoc +:controller-tools: https://github.com/kubernetes-sigs/controller-tools/releases + +[cols=",,", options="header"] +|=== +| Kubernetes | OpenShift | `controller-gen` + +| 1.25 Beta, `CustomResourceValidationExpressions` gate +| OCP 4.12 +| link:{controller-tools}/v0.9.0[v0.9.0] has `rule` and `message` fields on the `XValidation` marker + +| 1.27 adds `messageExpression` +| OCP 4.14 +| link:{controller-tools}/v0.15.0[v0.15.0] adds `messageExpression` field to the `XValidation` marker + +| 1.28 adds `reason` and `fieldPath` +| OCP 4.15 +| link:{controller-tools}/v0.16.0[v0.16.0] adds `reason` and `fieldPath` to the `XValidation` marker + +| 1.29 GA | OCP 4.16 | + +| 1.30 enables link:#validation-ratcheting[validation ratcheting]; link:https://pr.k8s.io/123475[fixes fieldPath]… +| OCP 4.17 +| link:{controller-tools}/v0.17.3[v0.17.3] adds `optionalOldSelf` to the `XValidation` marker + +| 1.34 link:https://pr.k8s.io/132837[fixes IntOrString cost] +| ? +| link:{controller-tools}/v0.18.0[v0.18.0] allows validation on IntOrString + +| 1.35 link:https://pr.k8s.io/132798[shows values when validation fails] +| ? +| n/a + +|=== +``` + + + +Some details are missing from the Go package documentation: https://pr.k8s.io/130660 + +| CEL [libraries](https://code.k8s.io/staging/src/k8s.io/apiserver/pkg/cel/library), extensions, etc. | Kubernetes | OpenShift | +| --- | --- | --- | +| kubernetes.authz | 1.28 | +| kubernetes.authzSelectors | 1.32 | +| kubernetes.format | 1.32 | [4.18](https://github.com/openshift/kubernetes/pull/2140) | +| kubernetes.lists | 1.24 | 4.12 | +| kubernetes.net.cidr | 1.31 | [4.16](https://github.com/openshift/kubernetes/pull/1828) | +| kubernetes.net.ip | 1.31 | [4.16](https://github.com/openshift/kubernetes/pull/1828) | +| kubernetes.quantity | 1.29 | 4.16 | +| kubernetes.regex | 1.24 | 4.12 | +| kubernetes.urls | 1.24 | 4.12 | +| [cross-type numeric comparison](https://pkg.go.dev/github.com/google/cel-go/cel#CrossTypeNumericComparisons) | 1.29 | 4.16 | +| [optional types](https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes) | 1.29 | 4.16 | +| [strings](https://pkg.go.dev/github.com/google/cel-go/ext#Strings) v0 | 1.24 | 4.12 | +| [strings](https://pkg.go.dev/github.com/google/cel-go/ext#Strings) v2 | 1.30 | 4.17 | +| [sets](https://pkg.go.dev/github.com/google/cel-go/ext#Sets) | 1.30 | 4.17 | +| [two-variable comprehension](https://pkg.go.dev/github.com/google/cel-go/ext#TwoVarComprehensions) | 1.33 | + + +# Validation Ratcheting + +> **Feature Gate:** `CRDValidationRatcheting` +> +> Enabled in Kubernetes 1.30 and GA in 1.33 (OpenShift 4.17 and ~4.20) + +[Validation ratcheting] allows update operations to succeed when unchanged fields are invalid. +This allows CRDs to add or "tighten" validation without breaking existing CR objects. + +Some schema changes are not ratcheted: + +- OpenAPI `allOf`, `oneOf`, `anyOf`, `not`; values in fields with these must be valid +- OpenAPI `required`; required fields are always required +- Removing `additionalProperties`; undefined fields are always dropped +- Adding or removing fields (names) in `properties`; undefined fields are dropped, and values in new fields must be valid +- Changes to `x-kubernetes-list-type` or `x-kubernetes-list-map-keys`; values in these fields must be valid +- Rules containing `oldSelf`; these are [transition rules] and should do their own ratcheting + +[transition rules]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions#transition-rules +[Validation ratcheting]: https://docs.k8s.io/tasks/extend-kubernetes/custom-resources/custom-resource-definitions#validation-ratcheting diff --git a/testing/chainsaw/e2e/values.yaml b/testing/chainsaw/e2e/values.yaml index 0c8a3ce580..152354e5dc 100644 --- a/testing/chainsaw/e2e/values.yaml +++ b/testing/chainsaw/e2e/values.yaml @@ -2,4 +2,4 @@ versions: postgres: '17' images: - psql: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.5-2520' + psql: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.6-2534' diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml index 244533b7ee..0290339143 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml @@ -6,12 +6,14 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml index 01aff25b3b..00c3d819fd 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml @@ -6,13 +6,15 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="jimi@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "Administrator" ] && [ $jimi_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml index 1dca13a7b7..f6eb83b2d9 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml @@ -6,13 +6,15 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="jimi@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "Administrator" ] && [ $jimi_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml index 5c0e7267e6..3e3d8396b3 100644 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml @@ -6,13 +6,15 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + # /usr/local/lib/python3.11/site-packages/pgadmin4 allows for various Python versions to be referenced in testing users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + bob_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq -r '.[] | select(.username=="jimi@example.com") | .role') - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + # Prior to pgAdmin 9.3, the role values were integers rather than strings. This supports both variations. + ( [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] ) || ( [ $bob_role = "Administrator" ] && [ $dave_role = "Administrator" ] && [ $jimi_role = "User" ] ) || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d)