diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..639a059edc --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/customizing-dependency-updates +# +# See: https://www.github.com/dependabot/dependabot-core/issues/4605 +--- +# yaml-language-server: $schema=https://json.schemastore.org/dependabot-2.0.json +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + day: tuesday + groups: + all-github-actions: + patterns: ['*'] diff --git a/.github/workflows/codeql-analysis.yaml b/.github/workflows/codeql-analysis.yaml index a310f3eeed..ae4d24d122 100644 --- a/.github/workflows/codeql-analysis.yaml +++ b/.github/workflows/codeql-analysis.yaml @@ -1,15 +1,20 @@ name: CodeQL on: + pull_request: push: branches: - - master + - main schedule: - cron: '10 18 * * 2' +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local + jobs: analyze: - name: Analyze runs-on: ubuntu-latest permissions: actions: read diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 193f05698a..c715f2a1d7 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -2,22 +2,27 @@ name: Linters on: pull_request: - branches: - - master + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local jobs: golangci-lint: runs-on: ubuntu-latest + permissions: + contents: read + checks: write steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: { go-version: stable } - - uses: golangci/golangci-lint-action@v4 + - uses: golangci/golangci-lint-action@v6 with: version: latest args: --timeout=5m - skip-cache: true # https://github.com/golangci/golangci-lint-action/issues/863 # Count issues reported by disabled linters. The command always # exits zero to ensure it does not fail the pull request check. diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 846616b74d..e8174e4f95 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -2,19 +2,22 @@ name: Tests on: pull_request: - branches: - - master push: branches: - - master + - main + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local jobs: go-test: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 - with: { go-version: 1.21 } + with: { go-version: stable } - run: make check - run: make check-generate @@ -22,7 +25,7 @@ jobs: run: go mod tidy && git diff --exit-code -- go.mod kubernetes-api: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [go-test] strategy: fail-fast: false @@ -36,7 +39,6 @@ jobs: - run: ENVTEST_K8S_VERSION="${KUBERNETES#default}" make check-envtest env: KUBERNETES: "${{ matrix.kubernetes }}" - GOEXPERIMENT: nocoverageredesign # https://go.dev/issue/65653 GO_TEST: go test --coverprofile 'envtest.coverage' --coverpkg ./internal/... # Upload coverage to GitHub @@ -49,12 +51,12 @@ jobs: kubernetes-k3d: if: "${{ github.repository == 'CrunchyData/postgres-operator' }}" - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest needs: [go-test] strategy: fail-fast: false matrix: - kubernetes: [v1.30, v1.25] + kubernetes: [v1.31, v1.28] steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 @@ -65,14 +67,13 @@ jobs: with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-0 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2 - run: make createnamespaces check-envtest-existing env: PGO_TEST_TIMEOUT_SCALE: 1.2 - GOEXPERIMENT: nocoverageredesign # https://go.dev/issue/65653 GO_TEST: go test --coverprofile 'envtest-existing.coverage' --coverpkg ./internal/... # Upload coverage to GitHub @@ -89,7 +90,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes: [v1.29, v1.28, v1.27, v1.26, v1.25] + kubernetes: [v1.31, v1.30, v1.29, v1.28] steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 @@ -100,16 +101,16 @@ jobs: with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-25 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-0 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0 registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.7-3.3-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.3-0 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.4-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0 - run: go mod download - name: Build executable run: PGO_VERSION='${{ github.sha }}' make build-postgres-operator @@ -118,7 +119,7 @@ jobs: run: make get-pgmonitor env: PGMONITOR_DIR: "${{ github.workspace }}/hack/tools/pgmonitor" - QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" + QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" # Start a Docker container with the working directory mounted. - name: Start PGO @@ -128,19 +129,20 @@ jobs: hack/create-kubeconfig.sh postgres-operator pgo docker run --detach --network host --read-only \ --volume "$(pwd):/mnt" --workdir '/mnt' --env 'PATH=/mnt/bin' \ + --env 'CHECK_FOR_UPGRADES=false' \ --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ - --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-25' \ - --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-0' \ - --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-0' \ + --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0' \ --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest' \ --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest' \ - --env 'RELATED_IMAGE_POSTGRES_15=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-0' \ - --env 'RELATED_IMAGE_POSTGRES_15_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.7-3.3-0' \ - --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-0' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.3-0' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.4-0' \ - --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.6-0' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2' \ + --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2' \ + --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0' \ + --env 'RELATED_IMAGE_POSTGRES_17_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.12-0' \ --env 'PGO_FEATURE_GATES=TablespaceVolumes=true' \ --name 'postgres-operator' ubuntu \ postgres-operator @@ -151,11 +153,11 @@ jobs: - run: make generate-kuttl env: - KUTTL_PG_UPGRADE_FROM_VERSION: '15' - KUTTL_PG_UPGRADE_TO_VERSION: '16' - KUTTL_PG_VERSION: '15' + KUTTL_PG_UPGRADE_FROM_VERSION: '16' + KUTTL_PG_UPGRADE_TO_VERSION: '17' + KUTTL_PG_VERSION: '16' KUTTL_POSTGIS_VERSION: '3.4' - KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-0' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2' - run: | make check-kuttl && exit failed=$? diff --git a/.github/workflows/trivy-pr-scan.yaml b/.github/workflows/trivy.yaml similarity index 60% rename from .github/workflows/trivy-pr-scan.yaml rename to .github/workflows/trivy.yaml index 2d1ab30fd1..2a16e4929c 100644 --- a/.github/workflows/trivy-pr-scan.yaml +++ b/.github/workflows/trivy.yaml @@ -1,17 +1,38 @@ -# Uses Trivy to scan every pull request, rejecting those with severe, fixable vulnerabilities. -# Scans on PR to master and weekly with same behavior. name: Trivy on: pull_request: - branches: - - master push: branches: - - master + - main + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + GOTOOLCHAIN: local jobs: - scan: + licenses: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + # Trivy needs a populated Go module cache to detect Go module licenses. + - uses: actions/setup-go@v5 + with: { go-version: stable } + - run: go mod download + + # Report success only when detected licenses are listed in [/trivy.yaml]. + - name: Scan licenses + uses: aquasecurity/trivy-action@0.28.0 + env: + TRIVY_DEBUG: true + with: + scan-type: filesystem + scanners: license + exit-code: 1 + + vulnerabilities: if: ${{ github.repository == 'CrunchyData/postgres-operator' }} permissions: @@ -28,23 +49,25 @@ jobs: # and is a convenience/redundant effort for those who prefer to # read logs and/or if anything goes wrong with the upload. - name: Log all detected vulnerabilities - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: - scan-type: fs + scan-type: filesystem hide-progress: true ignore-unfixed: true - + scanners: secret,vuln + # Upload actionable results to the GitHub Security tab. # Pull request checks fail according to repository settings. # - https://docs.github.com/en/code-security/code-scanning/integrating-with-code-scanning/uploading-a-sarif-file-to-github # - https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning - name: Report actionable vulnerabilities - uses: aquasecurity/trivy-action@master + uses: aquasecurity/trivy-action@0.28.0 with: - scan-type: fs + scan-type: filesystem ignore-unfixed: true format: 'sarif' output: 'trivy-results.sarif' + scanners: secret,vuln - name: Upload Trivy scan results to GitHub Security tab uses: github/codeql-action/upload-sarif@v3 diff --git a/.gitignore b/.gitignore index 2fa6186778..dcfd7074a3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .DS_Store /vendor/ /testing/kuttl/e2e-generated*/ +gke_gcloud_auth_plugin_cache diff --git a/.golangci.next.yaml b/.golangci.next.yaml index 4de8886ce7..95b3f63347 100644 --- a/.golangci.next.yaml +++ b/.golangci.next.yaml @@ -9,11 +9,11 @@ linters: disable-all: true enable: - contextcheck + - err113 - errchkjson - gocritic - godot - godox - - goerr113 - gofumpt - gosec # exclude-use-default - nilnil diff --git a/.golangci.yaml b/.golangci.yaml index 4983bbee85..87a6ed0464 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -6,9 +6,9 @@ linters: - errchkjson - gci - gofumpt - - scopelint enable: - depguard + - goheader - gomodguard - gosimple - importas @@ -44,6 +44,15 @@ linters-settings: exhaustive: default-signifies-exhaustive: true + goheader: + template: |- + Copyright {{ DATES }} Crunchy Data Solutions, Inc. + + SPDX-License-Identifier: Apache-2.0 + values: + regexp: + DATES: '((201[7-9]|202[0-3]) - 2024|2024)' + goimports: local-prefixes: github.com/crunchydata/postgres-operator @@ -58,6 +67,11 @@ linters-settings: k8s.io/kubernetes is for managing dependencies of the Kubernetes project, i.e. building kubelet and kubeadm. + gosec: + excludes: + # Flags for potentially-unsafe casting of ints, similar problem to globally-disabled G103 + - G115 + importas: alias: - pkg: k8s.io/api/(\w+)/(v[\w\w]+) @@ -68,6 +82,6 @@ linters-settings: alias: apierrors no-unaliased: true -run: - skip-dirs: +issues: + exclude-dirs: - pkg/generated diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 278beaffb1..e209f4e5a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,15 +13,11 @@ Thanks! We look forward to your contribution. # General Contributing Guidelines All ongoing development for an upcoming release gets committed to the -**`master`** branch. The `master` branch technically serves as the "development" -branch as well, but all code that is committed to the `master` branch should be +**`main`** branch. The `main` branch technically serves as the "development" +branch as well, but all code that is committed to the `main` branch should be considered _stable_, even if it is part of an ongoing release cycle. -All fixes for a supported release should be committed to the supported release -branch. For example, the 4.3 release is maintained on the `REL_4_3` branch. -Please see the section on _Supported Releases_ for more information. - -Ensure any changes are clear and well-documented. When we say "well-documented": +Ensure any changes are clear and well-documented: - If the changes include code, ensure all additional code has corresponding documentation in and around it. This includes documenting the definition of @@ -32,10 +28,7 @@ summarize how. Avoid simply repeating details from declarations,. When in doubt, favor overexplaining to underexplaining. - Code comments should be consistent with their language conventions. For -example, please use GoDoc conventions for Go source code. - -- Any new features must have corresponding user documentation. Any removed -features must have their user documentation removed from the documents. +example, please use `gofmt` [conventions](https://go.dev/doc/comment) for Go source code. - Do not submit commented-out code. If the code does not need to be used anymore, please remove it. @@ -62,12 +55,7 @@ All commits must either be rebased in atomic order or squashed (if the squashed commit is considered atomic). Merge commits are not accepted. All conflicts must be resolved prior to pushing changes. -**All pull requests should be made from the `master` branch** unless it is a fix -for a specific supported release. - -Once a major or minor release is made, no new features are added into the -release branch, only bug fixes. Any new features are added to the `master` -branch until the time that said new features are released. +**All pull requests should be made from the `main` branch.** # Commit Messages @@ -90,8 +78,7 @@ If you wish to tag a GitHub issue or another project management tracker, please do so at the bottom of the commit message, and make it clearly labeled like so: ``` -Issue: #123 -Issue: [sc-1234] +Issue: CrunchyData/postgres-operator#123 ``` # Submitting Pull Requests @@ -100,102 +87,23 @@ All work should be made in your own repository fork. When you believe your work is ready to be committed, please follow the guidance below for creating a pull request. -## Upcoming Releases / Features - -Ongoing work for new features should occur in branches off of the `master` -branch. It is suggested, but not required, that the branch name should reflect -that this is for an upcoming release, i.e. `upstream/branch-name` where the -`branch-name` is something descriptive for what you're working on. - -## Supported Releases / Fixes - -While not required, it is recommended to make your branch name along the lines -of: `REL_X_Y/branch-name` where the `branch-name` is something descriptive -for what you're working on. - -# Releases & Versioning - -Overall, release tags attempt to follow the -[semantic versioning](https://semver.org) scheme. - -"Supported releases" (described in the next section) occur on "minor" release -branches (e.g. the `x.y` portion of the `x.y.z`). - -One or more "patch" releases can occur after a minor release. A patch release is -used to fix bugs and other issues that may be found after a supported release. - -Fixes found on the `master` branch can be backported to a support release -branch. Any fixes for a supported release must have a pull request off of the -supported release branch, which is detailed below. - -## Supported Releases +## Upcoming Features -When a "minor" release is made, the release is stamped using the `vx.y.0` format -as denoted above, and a branch is created with the name `REL_X_Y`. Once a -minor release occurs, no new features are added to the `REL_X_Y` branch. -However, bug fixes can (and if found, should) be added to this branch. +Ongoing work for new features should occur in branches off of the `main` +branch. -To contribute a bug fix to a supported release, please make a pull request off -of the supported release branch. For instance, if you find a bug in the 4.3 -release, then you would make a pull request off of the `REL_4_3` branch. +## Unsupported Branches -## Unsupported Releases - -When a release is no longer supported, the branch will be renamed following the +When a release branch is no longer supported, it will be renamed following the pattern `REL_X_Y_FINAL` with the key suffix being _FINAL_. For example, `REL_3_2_FINAL` indicates that the 3.2 release is no longer supported. Nothing should ever be pushed to a `REL_X_Y_FINAL` branch once `FINAL` is on the branch name. -## Alpha, Beta, Release Candidate Releases - -At any point in the release cycle for a new release, there could exist one or -more alpha, beta, or release candidate (RC) release. Alpha, beta, and release -candidates **should not be used in production environments**. - -Alpha is the early stage of a release cycle and is typically made to test the -mechanics of an upcoming release. These should be considered relatively -unstable. The format for an alpha release tag is `v4.3.0-alpha.1`, which in this -case indicates it is the first alpha release for 4.3. - -Beta occurs during the later stage of a release cycle. At this point, the -release should be considered feature complete and the beta is used to -distribute, test, and collect feedback on the upcoming release. The betas should -be considered unstable, but as mentioned feature complete. The format for an -beta release tag is `v4.3.0-beta.1`, which in this case indicates it is the -first beta release for 4.3. - -Release candidates (RCs) occur just before a release. A release candidate should -be considered stable, and is typically used for a final round of bug checking -and testing. Multiple release candidates can occur in the event of serious bugs. -The format for a release candidate tag is `v4.3.0-rc.1`, which in this -case indicates it is the first release candidate for 4.3. - -**After a major or minor release, no alpha, beta, or release candidate releases -are supported**. In fact, any newer release of an alpha, beta, or RC immediately -deprecates any older alpha, beta or RC. (Naturally, a beta deprecates an alpha, -and a RC deprecates a beta). - -If you are testing on an older alpha, beta or RC, bug reports will not be -accepted. Please ensure you are testing on the latest version. - # Testing -We greatly appreciate any and all testing for the project. When testing, please -be sure you do the following: - -- If testing against a release, ensure your tests are performed against the -latest minor version (the last number in the release denotes the minor version, -e.g. the "3" in the 4.3.3) -- If testing against a pre-release (alpha, beta, RC), ensure your tests are -performed against latest version -- If testing against a development (`master`) or release (`REL_X_Y`) branch, -ensure your tests are performed against the latest commit - -Please do not test against unsupported versions (e.g. any release that is marked -final). - +We greatly appreciate any and all testing for the project. There are several ways to help with the testing effort: - Manual testing: testing particular features with a series of manual commands diff --git a/Makefile b/Makefile index 5313ca0cb8..37aca1a37e 100644 --- a/Makefile +++ b/Makefile @@ -6,25 +6,21 @@ PGO_IMAGE_URL ?= https://www.crunchydata.com/products/crunchy-postgresql-for-kub PGO_IMAGE_PREFIX ?= localhost PGMONITOR_DIR ?= hack/tools/pgmonitor -PGMONITOR_VERSION ?= v4.11.0 +PGMONITOR_VERSION ?= v5.1.1 QUERIES_CONFIG_DIR ?= hack/tools/queries +EXTERNAL_SNAPSHOTTER_DIR ?= hack/tools/external-snapshotter +EXTERNAL_SNAPSHOTTER_VERSION ?= v8.0.1 + # Buildah's "build" used to be "bud". Use the alias to be compatible for a while. BUILDAH_BUILD ?= buildah bud -DEBUG_BUILD ?= false GO ?= go -GO_BUILD = $(GO_CMD) build -trimpath -GO_CMD = $(GO_ENV) $(GO) +GO_BUILD = $(GO) build GO_TEST ?= $(GO) test KUTTL ?= kubectl-kuttl KUTTL_TEST ?= $(KUTTL) test -# Disable optimizations if creating a debug build -ifeq ("$(DEBUG_BUILD)", "true") - GO_BUILD = $(GO_CMD) build -gcflags='all=-N -l' -endif - ##@ General # The help target prints out all targets with their descriptions organized @@ -59,18 +55,23 @@ get-pgmonitor: cp -r '$(PGMONITOR_DIR)/postgres_exporter/common/.' '${QUERIES_CONFIG_DIR}' cp '$(PGMONITOR_DIR)/postgres_exporter/linux/queries_backrest.yml' '${QUERIES_CONFIG_DIR}' +.PHONY: get-external-snapshotter +get-external-snapshotter: + git -C '$(dir $(EXTERNAL_SNAPSHOTTER_DIR))' clone https://github.com/kubernetes-csi/external-snapshotter.git || git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' fetch origin + @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' checkout '$(EXTERNAL_SNAPSHOTTER_VERSION)' + @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' config pull.ff only + .PHONY: clean clean: ## Clean resources clean: clean-deprecated rm -f bin/postgres-operator - rm -f config/rbac/role.yaml rm -rf licenses/*/ [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other - rm -rf build/crd/generated build/crd/*/generated [ ! -f hack/tools/setup-envtest ] || rm hack/tools/setup-envtest [ ! -d hack/tools/envtest ] || { chmod -R u+w hack/tools/envtest && rm -r hack/tools/envtest; } [ ! -d hack/tools/pgmonitor ] || rm -rf hack/tools/pgmonitor + [ ! -d hack/tools/external-snapshotter ] || rm -rf hack/tools/external-snapshotter [ ! -n "$$(ls hack/tools)" ] || rm -r hack/tools/* [ ! -d hack/.kube ] || rm -r hack/.kube @@ -91,6 +92,8 @@ clean-deprecated: ## Clean deprecated resources @# crunchy-postgres-exporter used to live in this repo [ ! -d bin/crunchy-postgres-exporter ] || rm -r bin/crunchy-postgres-exporter [ ! -d build/crunchy-postgres-exporter ] || rm -r build/crunchy-postgres-exporter + @# CRDs used to require patching + [ ! -d build/crd ] || rm -r build/crd ##@ Deployment @@ -120,7 +123,7 @@ undeploy: ## Undeploy the PostgreSQL Operator .PHONY: deploy-dev deploy-dev: ## Deploy the PostgreSQL Operator locally -deploy-dev: PGO_FEATURE_GATES ?= "TablespaceVolumes=true" +deploy-dev: PGO_FEATURE_GATES ?= "TablespaceVolumes=true,VolumeSnapshots=true" deploy-dev: get-pgmonitor deploy-dev: build-postgres-operator deploy-dev: createnamespaces @@ -133,6 +136,9 @@ deploy-dev: createnamespaces CHECK_FOR_UPGRADES='$(if $(CHECK_FOR_UPGRADES),$(CHECK_FOR_UPGRADES),false)' \ KUBECONFIG=hack/.kube/postgres-operator/pgo \ PGO_NAMESPACE='postgres-operator' \ + PGO_INSTALLER='deploy-dev' \ + PGO_INSTALLER_ORIGIN='postgres-operator-repo' \ + BUILD_SOURCE='build-postgres-operator' \ $(shell kubectl kustomize ./config/dev | \ sed -ne '/^kind: Deployment/,/^---/ { \ /RELATED_IMAGE_/ { N; s,.*\(RELATED_[^[:space:]]*\).*value:[[:space:]]*\([^[:space:]]*\),\1="\2",; p; }; \ @@ -143,8 +149,9 @@ deploy-dev: createnamespaces ##@ Build - Binary .PHONY: build-postgres-operator build-postgres-operator: ## Build the postgres-operator binary - $(GO_BUILD) -ldflags '-X "main.versionString=$(PGO_VERSION)"' \ - -o bin/postgres-operator ./cmd/postgres-operator + CGO_ENABLED=1 $(GO_BUILD) $(\ + ) --ldflags '-X "main.versionString=$(PGO_VERSION)"' $(\ + ) --trimpath -o bin/postgres-operator ./cmd/postgres-operator ##@ Build - Images .PHONY: build-postgres-operator-image @@ -196,7 +203,7 @@ check: get-pgmonitor check-envtest: ## Run check using envtest and a mock kube api check-envtest: ENVTEST_USE = $(ENVTEST) --bin-dir=$(CURDIR)/hack/tools/envtest use $(ENVTEST_K8S_VERSION) check-envtest: SHELL = bash -check-envtest: get-pgmonitor tools/setup-envtest +check-envtest: get-pgmonitor tools/setup-envtest get-external-snapshotter @$(ENVTEST_USE) --print=overview && echo source <($(ENVTEST_USE) --print=env) && PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ $(GO_TEST) -count=1 -cover ./... @@ -207,7 +214,7 @@ check-envtest: get-pgmonitor tools/setup-envtest # make check-envtest-existing PGO_TEST_TIMEOUT_SCALE=1.2 .PHONY: check-envtest-existing check-envtest-existing: ## Run check using envtest and an existing kube api -check-envtest-existing: get-pgmonitor +check-envtest-existing: get-pgmonitor get-external-snapshotter check-envtest-existing: createnamespaces kubectl apply --server-side -k ./config/dev USE_EXISTING_CLUSTER=true PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ @@ -226,7 +233,7 @@ generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 15 generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 16 generate-kuttl: export KUTTL_PG_VERSION ?= 16 generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.4 -generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-0 +generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1 generate-kuttl: export KUTTL_TEST_DELETE_NAMESPACE ?= kuttl-test-delete-namespace generate-kuttl: ## Generate kuttl tests [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated @@ -272,27 +279,7 @@ generate-crd: tools/controller-gen $(CONTROLLER) \ crd:crdVersions='v1' \ paths='./pkg/apis/...' \ - output:dir='build/crd/postgresclusters/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml - @ - $(CONTROLLER) \ - crd:crdVersions='v1' \ - paths='./pkg/apis/...' \ - output:dir='build/crd/pgupgrades/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml - @ - $(CONTROLLER) \ - crd:crdVersions='v1' \ - paths='./pkg/apis/...' \ - output:dir='build/crd/pgadmins/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml - @ - $(CONTROLLER) \ - crd:crdVersions='v1' \ - paths='./pkg/apis/...' \ - output:dir='build/crd/crunchybridgeclusters/generated' # build/crd/{plural}/generated/{group}_{plural}.yaml - @ - kubectl kustomize ./build/crd/postgresclusters > ./config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml - kubectl kustomize ./build/crd/pgupgrades > ./config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml - kubectl kustomize ./build/crd/pgadmins > ./config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml - kubectl kustomize ./build/crd/crunchybridgeclusters > ./config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml + output:dir='config/crd/bases' # {directory}/{group}_{plural}.yaml .PHONY: generate-deepcopy generate-deepcopy: ## Generate DeepCopy functions @@ -305,10 +292,9 @@ generate-deepcopy: tools/controller-gen generate-rbac: ## Generate RBAC generate-rbac: tools/controller-gen $(CONTROLLER) \ - rbac:roleName='generated' \ - paths='./internal/...' \ - output:dir='config/rbac' # ${directory}/role.yaml - ./hack/generate-rbac.sh 'config/rbac' + rbac:roleName='postgres-operator' \ + paths='./cmd/...' paths='./internal/...' \ + output:dir='config/rbac' # {directory}/role.yaml ##@ Tools @@ -323,7 +309,7 @@ endef CONTROLLER ?= hack/tools/controller-gen tools: tools/controller-gen tools/controller-gen: - $(call go-get-tool,$(CONTROLLER),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.0) + $(call go-get-tool,$(CONTROLLER),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4) ENVTEST ?= hack/tools/setup-envtest tools: tools/setup-envtest diff --git a/README.md b/README.md index 9483c7c8b5..357734566e 100644 --- a/README.md +++ b/README.md @@ -185,22 +185,18 @@ In addition to the above, the geospatially enhanced PostgreSQL + PostGIS contain For more information about which versions of the PostgreSQL Operator include which components, please visit the [compatibility](https://access.crunchydata.com/documentation/postgres-operator/v5/references/components/) section of the documentation. -## Supported Platforms +## [Supported Platforms](https://access.crunchydata.com/documentation/postgres-operator/latest/overview/supported-platforms) PGO, the Postgres Operator from Crunchy Data, is tested on the following platforms: -- Kubernetes 1.25-1.30 -- OpenShift 4.10-4.15 +- Kubernetes +- OpenShift - Rancher - Google Kubernetes Engine (GKE), including Anthos - Amazon EKS - Microsoft AKS - VMware Tanzu -This list only includes the platforms that the Postgres Operator is specifically -tested on as part of the release process: PGO works on other Kubernetes -distributions as well. - # Contributing to the Project Want to contribute to the PostgreSQL Operator project? Great! We've put together @@ -214,7 +210,7 @@ Once you are ready to submit a Pull Request, please ensure you do the following: that you have followed the commit message format, added testing where appropriate, documented your changes, etc. 1. Open up a pull request based upon the guidelines. If you are adding a new - feature, please open up the pull request on the `master` branch. + feature, please open up the pull request on the `main` branch. 1. Please be as descriptive in your pull request as possible. If you are referencing an issue, please be sure to include the issue in your pull request diff --git a/build/crd/.gitignore b/build/crd/.gitignore deleted file mode 100644 index 83ad9d9191..0000000000 --- a/build/crd/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/crunchybridgeclusters/generated/ -/postgresclusters/generated/ -/pgupgrades/generated/ -/pgadmins/generated/ diff --git a/build/crd/crunchybridgeclusters/kustomization.yaml b/build/crd/crunchybridgeclusters/kustomization.yaml deleted file mode 100644 index 26454f3b07..0000000000 --- a/build/crd/crunchybridgeclusters/kustomization.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- generated/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml - -patches: -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: crunchybridgeclusters.postgres-operator.crunchydata.com -# The version below should match the version on the PostgresCluster CRD - patch: |- - - op: add - path: "/metadata/labels" - value: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest diff --git a/build/crd/pgadmins/kustomization.yaml b/build/crd/pgadmins/kustomization.yaml deleted file mode 100644 index ca67fb89fa..0000000000 --- a/build/crd/pgadmins/kustomization.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- generated/postgres-operator.crunchydata.com_pgadmins.yaml - -patches: -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgadmins.postgres-operator.crunchydata.com - path: todos.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgadmins.postgres-operator.crunchydata.com -# The version below should match the version on the PostgresCluster CRD - patch: |- - - op: add - path: "/metadata/labels" - value: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest diff --git a/build/crd/pgadmins/todos.yaml b/build/crd/pgadmins/todos.yaml deleted file mode 100644 index 5412d0ad21..0000000000 --- a/build/crd/pgadmins/todos.yaml +++ /dev/null @@ -1,23 +0,0 @@ -- op: add - path: /work - value: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/imagePullSecrets/items/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/ldapBindPassword/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/users/items/properties/passwordRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/configDatabaseURI/properties/name/description -- op: remove - path: /work diff --git a/build/crd/pgupgrades/kustomization.yaml b/build/crd/pgupgrades/kustomization.yaml deleted file mode 100644 index 260b7e42cd..0000000000 --- a/build/crd/pgupgrades/kustomization.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- generated/postgres-operator.crunchydata.com_pgupgrades.yaml - -patches: -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgupgrades.postgres-operator.crunchydata.com - path: todos.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: pgupgrades.postgres-operator.crunchydata.com -# The version below should match the version on the PostgresCluster CRD - patch: |- - - op: add - path: "/metadata/labels" - value: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest diff --git a/build/crd/pgupgrades/todos.yaml b/build/crd/pgupgrades/todos.yaml deleted file mode 100644 index c0d2202859..0000000000 --- a/build/crd/pgupgrades/todos.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- op: add - path: /work - value: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/imagePullSecrets/items/properties/name/description -- op: remove - path: /work diff --git a/build/crd/postgresclusters/condition.yaml b/build/crd/postgresclusters/condition.yaml deleted file mode 100644 index 577787b520..0000000000 --- a/build/crd/postgresclusters/condition.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# PostgresCluster "v1beta1" is in "/spec/versions/0" - -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/status/properties/conditions/items/description - value: Condition contains details for one aspect of the current state of this API Resource. -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/status/properties/conditions/items/properties/type/description - value: type of condition in CamelCase. -- op: add - path: "/spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items\ - /properties/securityContext/properties/seccompProfile/properties/type/description" - value: >- - type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. -- op: add - path: "/spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties\ - /containers/items/properties/securityContext/properties/seccompProfile/properties/type/description" - value: >- - type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. diff --git a/build/crd/postgresclusters/kustomization.yaml b/build/crd/postgresclusters/kustomization.yaml deleted file mode 100644 index eb8cb6540f..0000000000 --- a/build/crd/postgresclusters/kustomization.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- generated/postgres-operator.crunchydata.com_postgresclusters.yaml - -patchesJson6902: -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: condition.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: todos.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - path: validation.yaml -- target: - group: apiextensions.k8s.io - version: v1 - kind: CustomResourceDefinition - name: postgresclusters.postgres-operator.crunchydata.com - patch: |- - - op: add - path: "/metadata/labels" - value: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest diff --git a/build/crd/postgresclusters/todos.yaml b/build/crd/postgresclusters/todos.yaml deleted file mode 100644 index daa05249a0..0000000000 --- a/build/crd/postgresclusters/todos.yaml +++ /dev/null @@ -1,89 +0,0 @@ -- op: add - path: /work - value: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/configuration/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/configuration/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repoHost/properties/sshConfigMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repoHost/properties/sshSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/customReplicationTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/customTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/dataSource/properties/pgbackrest/properties/configuration/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/dataSource/properties/pgbackrest/properties/configuration/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/imagePullSecrets/items/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/env/items/properties/valueFrom/properties/configMapKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/env/items/properties/valueFrom/properties/secretKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/envFrom/items/properties/configMapRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/containers/items/properties/envFrom/items/properties/secretRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/monitoring/properties/pgmonitor/properties/exporter/properties/configuration/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/monitoring/properties/pgmonitor/properties/exporter/properties/configuration/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/monitoring/properties/pgmonitor/properties/exporter/properties/customTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/env/items/properties/valueFrom/properties/configMapKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/env/items/properties/valueFrom/properties/secretKeyRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/envFrom/items/properties/configMapRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/containers/items/properties/envFrom/items/properties/secretRef/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/proxy/properties/pgBouncer/properties/customTLSSecret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/userInterface/properties/pgAdmin/properties/config/properties/files/items/properties/configMap/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/userInterface/properties/pgAdmin/properties/config/properties/files/items/properties/secret/properties/name/description -- op: copy - from: /work - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/userInterface/properties/pgAdmin/properties/config/properties/ldapBindPassword/properties/name/description -- op: remove - path: /work diff --git a/build/crd/postgresclusters/validation.yaml b/build/crd/postgresclusters/validation.yaml deleted file mode 100644 index c619c4f11d..0000000000 --- a/build/crd/postgresclusters/validation.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# PostgresCluster "v1beta1" is in "/spec/versions/0" - -# Make a temporary workspace. -- { op: add, path: /work, value: {} } - -# Containers should not run with a root GID. -# - https://kubernetes.io/docs/concepts/security/pod-security-standards/ -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/supplementalGroups/items/minimum - value: 1 - -# Supplementary GIDs must fit within int32. -# - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L3659-L3663 -# - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L3923-L3927 -- op: add - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/supplementalGroups/items/maximum - value: 2147483647 # math.MaxInt32 - -# Make a copy of a standard PVC properties. -- op: copy - from: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/properties - path: /work/pvcSpecProperties - -# Start an empty list when a standard PVC has no required fields. -- op: test - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/required - value: null -- op: add - path: /work/pvcSpecRequired - value: [] - -# PersistentVolumeClaims must have an access mode. -# - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L1893-L1895 -# - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L2073-L2075 -- op: add - path: /work/pvcSpecRequired/- - value: accessModes -- op: add - path: /work/pvcSpecProperties/accessModes/minItems - value: 1 - -# PersistentVolumeClaims must have a storage request. -# - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L1904-L1911 -# - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L2101-L2108 -- op: add - path: /work/pvcSpecRequired/- - value: resources -- op: add - path: /work/pvcSpecProperties/resources/required - value: [requests] -- op: add - path: /work/pvcSpecProperties/resources/properties/requests/required - value: [storage] - -# Replace PVCs throughout the CRD. -- op: copy - from: /work/pvcSpecProperties - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/properties -- op: copy - from: /work/pvcSpecRequired - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/dataVolumeClaimSpec/required -- op: copy - from: /work/pvcSpecProperties - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/walVolumeClaimSpec/properties -- op: copy - from: /work/pvcSpecRequired - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/instances/items/properties/walVolumeClaimSpec/required -- op: copy - from: /work/pvcSpecProperties - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repos/items/properties/volume/properties/volumeClaimSpec/properties -- op: copy - from: /work/pvcSpecRequired - path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/backups/properties/pgbackrest/properties/repos/items/properties/volume/properties/volumeClaimSpec/required - -# Remove the temporary workspace. -- { op: remove, path: /work } diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 78e88b4031..b2f8ae49b6 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -1,32 +1,24 @@ -package main - -/* -Copyright 2017 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package main import ( "context" + "fmt" "net/http" "os" + "strconv" "strings" + "time" + "unicode" - "github.com/go-logr/logr" "go.opentelemetry.io/otel" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" - cruntime "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/healthz" "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/bridge/crunchybridgecluster" @@ -34,11 +26,13 @@ import ( "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/controller/standalone_pgadmin" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/upgradecheck" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) var versionString string @@ -51,18 +45,83 @@ func assertNoError(err error) { } func initLogging() { - // Configure a singleton that treats logr.Logger.V(1) as logrus.DebugLevel. + // Configure a singleton that treats logging.Logger.V(1) as logrus.DebugLevel. var verbosity int if strings.EqualFold(os.Getenv("CRUNCHY_DEBUG"), "true") { verbosity = 1 } logging.SetLogSink(logging.Logrus(os.Stdout, versionString, 1, verbosity)) + + global := logging.FromContext(context.Background()) + runtime.SetLogger(global) +} + +//+kubebuilder:rbac:groups="coordination.k8s.io",resources="leases",verbs={get,create,update,watch} + +func initManager() (runtime.Options, error) { + log := logging.FromContext(context.Background()) + + options := runtime.Options{} + options.Cache.SyncPeriod = initialize.Pointer(time.Hour) + + options.HealthProbeBindAddress = ":8081" + + // Enable leader elections when configured with a valid Lease.coordination.k8s.io name. + // - https://docs.k8s.io/concepts/architecture/leases + // - https://releases.k8s.io/v1.30.0/pkg/apis/coordination/validation/validation.go#L26 + if lease := os.Getenv("PGO_CONTROLLER_LEASE_NAME"); len(lease) > 0 { + if errs := validation.IsDNS1123Subdomain(lease); len(errs) > 0 { + return options, fmt.Errorf("value for PGO_CONTROLLER_LEASE_NAME is invalid: %v", errs) + } + + options.LeaderElection = true + options.LeaderElectionID = lease + options.LeaderElectionNamespace = os.Getenv("PGO_NAMESPACE") + } + + // Check PGO_TARGET_NAMESPACE for backwards compatibility with + // "singlenamespace" installations + singlenamespace := strings.TrimSpace(os.Getenv("PGO_TARGET_NAMESPACE")) + + // Check PGO_TARGET_NAMESPACES for non-cluster-wide, multi-namespace + // installations + multinamespace := strings.TrimSpace(os.Getenv("PGO_TARGET_NAMESPACES")) + + // Initialize DefaultNamespaces if any target namespaces are set + if len(singlenamespace) > 0 || len(multinamespace) > 0 { + options.Cache.DefaultNamespaces = map[string]runtime.CacheConfig{} + } + + if len(singlenamespace) > 0 { + options.Cache.DefaultNamespaces[singlenamespace] = runtime.CacheConfig{} + } + + if len(multinamespace) > 0 { + for _, namespace := range strings.FieldsFunc(multinamespace, func(c rune) bool { + return c != '-' && !unicode.IsLetter(c) && !unicode.IsNumber(c) + }) { + options.Cache.DefaultNamespaces[namespace] = runtime.CacheConfig{} + } + } + + options.Controller.GroupKindConcurrency = map[string]int{ + "PostgresCluster." + v1beta1.GroupVersion.Group: 2, + } + + if s := os.Getenv("PGO_WORKERS"); s != "" { + if i, err := strconv.Atoi(s); err == nil && i > 0 { + options.Controller.GroupKindConcurrency["PostgresCluster."+v1beta1.GroupVersion.Group] = i + } else { + log.Error(err, "PGO_WORKERS must be a positive number") + } + } + + return options, nil } func main() { - // Set any supplied feature gates; panic on any unrecognized feature gate - err := util.AddAndSetFeatureGates(os.Getenv("PGO_FEATURE_GATES")) - assertNoError(err) + // This context is canceled by SIGINT, SIGTERM, or by calling shutdown. + ctx, shutdown := context.WithCancel(runtime.SignalHandler()) otelFlush, err := initOpenTelemetry() assertNoError(err) @@ -70,16 +129,12 @@ func main() { initLogging() - // create a context that will be used to stop all controllers on a SIGTERM or SIGINT - ctx := cruntime.SetupSignalHandler() - ctx, shutdown := context.WithCancel(ctx) log := logging.FromContext(ctx) log.V(1).Info("debug flag set to true") - log.Info("feature gates enabled", - "PGO_FEATURE_GATES", os.Getenv("PGO_FEATURE_GATES")) - - cruntime.SetLogger(log) + features := feature.NewGate() + assertNoError(features.Set(os.Getenv("PGO_FEATURE_GATES"))) + log.Info("feature gates enabled", "PGO_FEATURE_GATES", features.String()) cfg, err := runtime.GetConfig() assertNoError(err) @@ -91,7 +146,18 @@ func main() { // deprecation warnings when using an older version of a resource for backwards compatibility). rest.SetDefaultWarningHandler(rest.NoWarnings{}) - mgr, err := runtime.CreateRuntimeManager(os.Getenv("PGO_TARGET_NAMESPACE"), cfg, false) + options, err := initManager() + assertNoError(err) + + // Add to the Context that Manager passes to Reconciler.Start, Runnable.Start, + // and eventually Reconciler.Reconcile. + options.BaseContext = func() context.Context { + ctx := context.Background() + ctx = feature.NewContext(ctx, features) + return ctx + } + + mgr, err := runtime.NewManager(cfg, options) assertNoError(err) openshift := isOpenshift(cfg) @@ -102,12 +168,12 @@ func main() { registrar, err := registration.NewRunner(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), shutdown) assertNoError(err) assertNoError(mgr.Add(registrar)) - _ = registrar.CheckToken() + token, _ := registrar.CheckToken() // add all PostgreSQL Operator controllers to the runtime manager addControllersToManager(mgr, openshift, log, registrar) - if util.DefaultMutableFeatureGate.Enabled(util.BridgeIdentifiers) { + if features.Enabled(feature.BridgeIdentifiers) { constructor := func() *bridge.Client { client := bridge.NewClient(os.Getenv("PGO_BRIDGE_URL"), versionString) client.Transport = otelTransportWrapper()(http.DefaultTransport) @@ -122,12 +188,22 @@ func main() { if !upgradeCheckingDisabled { log.Info("upgrade checking enabled") // get the URL for the check for upgrades endpoint if set in the env - assertNoError(upgradecheck.ManagedScheduler(mgr, - openshift, os.Getenv("CHECK_FOR_UPGRADES_URL"), versionString)) + assertNoError( + upgradecheck.ManagedScheduler( + mgr, + openshift, + os.Getenv("CHECK_FOR_UPGRADES_URL"), + versionString, + token, + )) } else { log.Info("upgrade checking disabled") } + // Enable health probes + assertNoError(mgr.AddHealthzCheck("health", healthz.Ping)) + assertNoError(mgr.AddReadyzCheck("check", healthz.Ping)) + log.Info("starting controller runtime manager and will wait for signal to exit") assertNoError(mgr.Start(ctx)) @@ -136,7 +212,7 @@ func main() { // addControllersToManager adds all PostgreSQL Operator controllers to the provided controller // runtime manager. -func addControllersToManager(mgr manager.Manager, openshift bool, log logr.Logger, reg registration.Registration) { +func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Logger, reg registration.Registration) { pgReconciler := &postgrescluster.Reconciler{ Client: mgr.GetClient(), IsOpenShift: openshift, diff --git a/cmd/postgres-operator/main_test.go b/cmd/postgres-operator/main_test.go new file mode 100644 index 0000000000..f369ce6bd3 --- /dev/null +++ b/cmd/postgres-operator/main_test.go @@ -0,0 +1,118 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "reflect" + "testing" + "time" + + "gotest.tools/v3/assert" + "gotest.tools/v3/assert/cmp" +) + +func TestInitManager(t *testing.T) { + t.Run("Defaults", func(t *testing.T) { + options, err := initManager() + assert.NilError(t, err) + + if assert.Check(t, options.Cache.SyncPeriod != nil) { + assert.Equal(t, *options.Cache.SyncPeriod, time.Hour) + } + + assert.Assert(t, options.HealthProbeBindAddress == ":8081") + + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 2, + }) + + assert.Assert(t, options.Cache.DefaultNamespaces == nil) + assert.Assert(t, options.LeaderElection == false) + + { + options.Cache.SyncPeriod = nil + options.Controller.GroupKindConcurrency = nil + options.HealthProbeBindAddress = "" + + assert.Assert(t, reflect.ValueOf(options).IsZero(), + "expected remaining fields to be unset:\n%+v", options) + } + }) + + t.Run("PGO_CONTROLLER_LEASE_NAME", func(t *testing.T) { + t.Setenv("PGO_NAMESPACE", "test-namespace") + + t.Run("Invalid", func(t *testing.T) { + t.Setenv("PGO_CONTROLLER_LEASE_NAME", "INVALID_NAME") + + options, err := initManager() + assert.ErrorContains(t, err, "PGO_CONTROLLER_LEASE_NAME") + assert.ErrorContains(t, err, "invalid") + + assert.Assert(t, options.LeaderElection == false) + assert.Equal(t, options.LeaderElectionNamespace, "") + }) + + t.Run("Valid", func(t *testing.T) { + t.Setenv("PGO_CONTROLLER_LEASE_NAME", "valid-name") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, options.LeaderElection == true) + assert.Equal(t, options.LeaderElectionNamespace, "test-namespace") + assert.Equal(t, options.LeaderElectionID, "valid-name") + }) + }) + + t.Run("PGO_TARGET_NAMESPACE", func(t *testing.T) { + t.Setenv("PGO_TARGET_NAMESPACE", "some-such") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, cmp.Len(options.Cache.DefaultNamespaces, 1), + "expected only one configured namespace") + + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "some-such")) + }) + + t.Run("PGO_TARGET_NAMESPACES", func(t *testing.T) { + t.Setenv("PGO_TARGET_NAMESPACES", "some-such,another-one") + + options, err := initManager() + assert.NilError(t, err) + assert.Assert(t, cmp.Len(options.Cache.DefaultNamespaces, 2), + "expect two configured namespaces") + + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "some-such")) + assert.Assert(t, cmp.Contains(options.Cache.DefaultNamespaces, "another-one")) + }) + + t.Run("PGO_WORKERS", func(t *testing.T) { + t.Run("Invalid", func(t *testing.T) { + for _, v := range []string{"-3", "0", "3.14"} { + t.Setenv("PGO_WORKERS", v) + + options, err := initManager() + assert.NilError(t, err) + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 2, + }) + } + }) + + t.Run("Valid", func(t *testing.T) { + t.Setenv("PGO_WORKERS", "19") + + options, err := initManager() + assert.NilError(t, err) + assert.DeepEqual(t, options.Controller.GroupKindConcurrency, + map[string]int{ + "PostgresCluster.postgres-operator.crunchydata.com": 19, + }) + }) + }) +} diff --git a/cmd/postgres-operator/open_telemetry.go b/cmd/postgres-operator/open_telemetry.go index 94050b987e..2c9eedc135 100644 --- a/cmd/postgres-operator/open_telemetry.go +++ b/cmd/postgres-operator/open_telemetry.go @@ -1,19 +1,8 @@ -package main - -/* -Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +package main import ( "context" diff --git a/config/README.md b/config/README.md index d1ecf9d1f8..73d2e59e6f 100644 --- a/config/README.md +++ b/config/README.md @@ -1,16 +1,7 @@ @@ -19,9 +10,6 @@ - The `default` target installs the operator in the `postgres-operator` namespace and configures it to manage resources in all namespaces. -- The `singlenamespace` target installs the operator in the `postgres-operator` - namespace and configures it to manage resources in that same namespace. - diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml index e0bff0cc56..82db84b466 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -1,12 +1,9 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null - labels: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest + controller-gen.kubebuilder.io/version: v0.16.4 name: crunchybridgeclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -24,44 +21,49 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster + description: |- + CrunchyBridgeClusterSpec defines the desired state of CrunchyBridgeCluster to be managed by Crunchy Data Bridge properties: clusterName: - description: The name of the cluster --- According to Bridge API/GUI - errors, "Field name should be between 5 and 50 characters in length, - containing only unicode characters, unicode numbers, hyphens, spaces, - or underscores, and starting with a character", and ending with - a character or number. + description: The name of the cluster maxLength: 50 minLength: 5 pattern: ^[A-Za-z][A-Za-z0-9\-_ ]*[A-Za-z0-9]$ type: string isHa: - description: Whether the cluster is high availability, meaning that - it has a secondary it can fail over to quickly in case the primary - becomes unavailable. + description: |- + Whether the cluster is high availability, + meaning that it has a secondary it can fail over to quickly + in case the primary becomes unavailable. type: boolean isProtected: - description: Whether the cluster is protected. Protected clusters - can't be destroyed until their protected flag is removed + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed type: boolean majorVersion: - description: The ID of the cluster's major Postgres version. Currently - Bridge offers 13-16 - maximum: 16 + description: |- + The ID of the cluster's major Postgres version. + Currently Bridge offers 13-17 + maximum: 17 minimum: 13 type: integer metadata: @@ -81,8 +83,9 @@ spec: and memory. type: string provider: - description: The cloud provider where the cluster is located. Currently - Bridge offers aws, azure, and gcp only + description: |- + The cloud provider where the cluster is located. + Currently Bridge offers aws, azure, and gcp only enum: - aws - azure @@ -98,16 +101,17 @@ spec: - message: immutable rule: self == oldSelf roles: - description: Roles for which to create Secrets that contain their - credentials which are retrieved from the Bridge API. An empty list - creates no role secrets. Removing a role from this list does NOT - drop the role nor revoke their access, but it will delete that role's - secret from the kube cluster. + description: |- + Roles for which to create Secrets that contain their credentials which + are retrieved from the Bridge API. An empty list creates no role secrets. + Removing a role from this list does NOT drop the role nor revoke their + access, but it will delete that role's secret from the kube cluster. items: properties: name: - description: 'Name of the role within Crunchy Bridge. More info: - https://docs.crunchybridge.com/concepts/users' + description: |- + Name of the role within Crunchy Bridge. + More info: https://docs.crunchybridge.com/concepts/users type: string secretName: description: The name of the Secret that will hold the role @@ -131,11 +135,12 @@ spec: anyOf: - type: integer - type: string - description: The amount of storage available to the cluster in gigabytes. - The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) - to match Kubernetes conventions. If the amount is given in Gi, we - round to the nearest G value. The minimum value allowed by Bridge - is 10 GB. The maximum value allowed by Bridge is 65535 GB. + description: |- + The amount of storage available to the cluster in gigabytes. + The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. + If the amount is given in Gi, we round to the nearest G value. + The minimum value allowed by Bridge is 10 GB. + The maximum value allowed by Bridge is 65535 GB. pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true required: @@ -145,6 +150,7 @@ spec: - plan - provider - region + - secret - storage type: object status: @@ -155,43 +161,35 @@ spec: description: conditions represent the observations of postgres cluster's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a foo's - current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -206,10 +204,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -233,13 +227,14 @@ spec: Bridge API and null until then. type: string isHa: - description: Whether the cluster is high availability, meaning that - it has a secondary it can fail over to quickly in case the primary - becomes unavailable. + description: |- + Whether the cluster is high availability, meaning that it has a secondary it can fail + over to quickly in case the primary becomes unavailable. type: boolean isProtected: - description: Whether the cluster is protected. Protected clusters - can't be destroyed until their protected flag is removed + description: |- + Whether the cluster is protected. Protected clusters can't be destroyed until + their protected flag is removed type: boolean majorVersion: description: The cluster's major Postgres version. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index c0f184213a..da729cfaf2 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -1,12 +1,9 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null - labels: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest + controller-gen.kubebuilder.io/version: v0.16.4 name: pgadmins.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -23,14 +20,19 @@ spec: description: PGAdmin is the Schema for the PGAdmin API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -38,30 +40,29 @@ spec: description: PGAdminSpec defines the desired state of PGAdmin properties: affinity: - description: 'Scheduling constraints of the PGAdmin pod. More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the PGAdmin pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated with the @@ -71,75 +72,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. @@ -150,116 +148,115 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -270,137 +267,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -408,146 +429,177 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. @@ -555,16 +607,15 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -575,137 +626,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -713,163 +788,202 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object config: - description: Configuration settings for the pgAdmin process. Changes - to any of these values will be loaded without validation. Be careful, - as you may put pgAdmin into an unusable state. + description: |- + Configuration settings for the pgAdmin process. Changes to any of these + values will be loaded without validation. Be careful, as + you may put pgAdmin into an unusable state. properties: configDatabaseURI: - description: 'A Secret containing the value for the CONFIG_DATABASE_URI - setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/external_database.html' + description: |- + A Secret containing the value for the CONFIG_DATABASE_URI setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/external_database.html properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret or its key must be @@ -878,29 +992,120 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic files: - description: Files allows the user to mount projected volumes - into the pgAdmin container so that files can be referenced by - pgAdmin as needed. + description: |- + Files allows the user to mount projected volumes into the pgAdmin + container so that files can be referenced by pgAdmin as needed. items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced ConfigMap will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a - key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -909,22 +1114,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -932,14 +1135,22 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -952,8 +1163,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of the - pod: only annotations, labels, name and namespace - are supported.' + pod: only annotations, labels, name, namespace + and uid are supported.' properties: apiVersion: description: Version of the schema the FieldPath @@ -966,17 +1177,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to set - permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal - values, JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict with - other options that affect the file mode, like - fsGroup, and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -987,10 +1196,9 @@ spec: path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required for @@ -1010,26 +1218,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced Secret will be - projected into the volume as a file whose name is - the key and content is the value. If specified, the - listed keys will be projected into the specified paths, - and unlisted keys will not be present. If a key is - specified which is not present in the Secret, the - volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -1038,22 +1246,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -1061,39 +1267,47 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of the - token. A recipient of a token must identify itself - with an identifier specified in the audience of the - token, and otherwise should reject the token. The - audience defaults to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested duration - of validity of the service account token. As the token - approaches expiration, the kubelet volume plugin will - proactively rotate the service account token. The - kubelet will start trying to rotate the token if the - token is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults to - 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -1101,19 +1315,28 @@ spec: type: object type: array gunicorn: - description: 'Settings for the gunicorn server. More info: https://docs.gunicorn.org/en/latest/settings.html' + description: |- + Settings for the gunicorn server. + More info: https://docs.gunicorn.org/en/latest/settings.html type: object x-kubernetes-preserve-unknown-fields: true ldapBindPassword: - description: 'A Secret containing the value for the LDAP_BIND_PASSWORD - setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html' + description: |- + A Secret containing the value for the LDAP_BIND_PASSWORD setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret or its key must be @@ -1122,37 +1345,44 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic settings: - description: 'Settings for the pgAdmin server process. Keys should - be uppercase and values must be constants. More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html' + description: |- + Settings for the pgAdmin server process. Keys should be uppercase and + values must be constants. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html type: object x-kubernetes-preserve-unknown-fields: true type: object dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for pgAdmin data. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for pgAdmin data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access modes the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify either: + description: |- + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature gate is enabled, - this field will always have the same contents as the DataSourceRef - field.' + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -1164,32 +1394,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from which to - populate the volume with data, if a non-empty volume is desired. - This may be any local object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator - or dynamic provisioner. This field will replace the functionality - of the DataSource field and as such if both fields are non-empty, - they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set to the - same value automatically if one of them is empty and the other - is non-empty. There are two important differences between DataSource - and DataSourceRef: * While DataSource only allows two specific - types of objects, DataSourceRef allows any non-core object, - as well as PersistentVolumeClaim objects. * While DataSource - ignores disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value is - specified. (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -1197,16 +1433,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources the volume - should have. If RecoverVolumeExpansionFailure feature is enabled - users are allowed to specify resource requirements that are - lower than previous value but must still be higher than capacity - recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -1215,8 +1458,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1225,10 +1469,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -1239,51 +1484,69 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume @@ -1294,25 +1557,36 @@ spec: description: The image name to use for pgAdmin instance. type: string imagePullPolicy: - description: 'ImagePullPolicy is used to determine when Kubernetes - will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy enum: - Always - Never - IfNotPresent type: string imagePullSecrets: - description: The image pull secrets used to pull from a private registry. + description: |- + The image pull secrets used to pull from a private registry. Changing this value causes all running PGAdmin pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string type: object + x-kubernetes-map-type: atomic type: array metadata: description: Metadata contains metadata for custom resources @@ -1327,12 +1601,39 @@ spec: type: object type: object priorityClassName: - description: 'Priority class name for the PGAdmin pod. Changing this - value causes PGAdmin pod to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the PGAdmin pod. Changing this + value causes PGAdmin pod to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: description: Resource requirements for the PGAdmin container. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -1340,8 +1641,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1350,75 +1652,79 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object serverGroups: - description: ServerGroups for importing PostgresClusters to pgAdmin. - To create a pgAdmin with no selectors, leave this field empty. A - pgAdmin created with no `ServerGroups` will not automatically add - any servers through discovery. PostgresClusters can still be added - manually. + description: |- + ServerGroups for importing PostgresClusters to pgAdmin. + To create a pgAdmin with no selectors, leave this field empty. + A pgAdmin created with no `ServerGroups` will not automatically + add any servers through discovery. PostgresClusters can still be + added manually. items: properties: name: - description: The name for the ServerGroup in pgAdmin. Must be - unique in the pgAdmin's ServerGroups since it becomes the - ServerGroup name in pgAdmin. + description: |- + The name for the ServerGroup in pgAdmin. + Must be unique in the pgAdmin's ServerGroups since it becomes the ServerGroup name in pgAdmin. type: string postgresClusterName: description: PostgresClusterName selects one cluster to add to pgAdmin by name. type: string postgresClusterSelector: - description: PostgresClusterSelector selects clusters to dynamically - add to pgAdmin by matching labels. An empty selector like - `{}` will select ALL clusters in the namespace. + description: |- + PostgresClusterSelector selects clusters to dynamically add to pgAdmin by matching labels. + An empty selector like `{}` will select ALL clusters in the namespace. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic required: - name type: object @@ -1428,55 +1734,58 @@ spec: rule: '[has(self.postgresClusterName),has(self.postgresClusterSelector)].exists_one(x,x)' type: array serviceName: - description: ServiceName will be used as the name of a ClusterIP service - pointing to the pgAdmin pod and port. If the service already exists, - PGO will update the service. For more information about services - reference the Kubernetes and CrunchyData documentation. https://kubernetes.io/docs/concepts/services-networking/service/ + description: |- + ServiceName will be used as the name of a ClusterIP service pointing + to the pgAdmin pod and port. If the service already exists, PGO will + update the service. For more information about services reference + the Kubernetes and CrunchyData documentation. + https://kubernetes.io/docs/concepts/services-networking/service/ type: string tolerations: - description: 'Tolerations of the PGAdmin pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the PGAdmin pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array users: - description: pgAdmin users that are managed via the PGAdmin spec. - Users can still be added via the pgAdmin GUI, but those users will - not show up here. + description: |- + pgAdmin users that are managed via the PGAdmin spec. Users can still + be added via the pgAdmin GUI, but those users will not show up here. items: properties: passwordRef: @@ -1488,7 +1797,13 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret or its key must @@ -1497,17 +1812,19 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic role: - description: Role determines whether the user has admin privileges - or not. Defaults to User. Valid options are Administrator - and User. + description: |- + Role determines whether the user has admin privileges or not. + Defaults to User. Valid options are Administrator and User. enum: - Administrator - User type: string username: - description: The username for User in pgAdmin. Must be unique - in the pgAdmin's users list. + description: |- + The username for User in pgAdmin. + Must be unique in the pgAdmin's users list. type: string required: - passwordRef @@ -1524,46 +1841,39 @@ spec: description: PGAdminStatus defines the observed state of PGAdmin properties: conditions: - description: 'conditions represent the observations of pgAdmin''s - current state. Known .status.conditions.type is: "PersistentVolumeResizing"' + description: |- + conditions represent the observations of pgAdmin's current state. + Known .status.conditions.type is: "PersistentVolumeResizing" items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a foo's - current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -1578,10 +1888,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml index 08d1472582..4ae831cfc7 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgupgrades.yaml @@ -1,12 +1,9 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null - labels: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest + controller-gen.kubebuilder.io/version: v0.16.4 name: pgupgrades.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -23,14 +20,19 @@ spec: description: PGUpgrade is the Schema for the pgupgrades API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -38,30 +40,29 @@ spec: description: PGUpgradeSpec defines the desired state of PGUpgrade properties: affinity: - description: 'Scheduling constraints of the PGUpgrade pod. More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the PGUpgrade pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated with the @@ -71,75 +72,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. @@ -150,116 +148,115 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -270,137 +267,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -408,146 +429,177 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. @@ -555,16 +607,15 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -575,137 +626,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces field. - null selector and null or empty namespaces list - means "this pod's namespace". An empty selector - ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. The - term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces list - and null namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -713,176 +788,218 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied to the - union of the namespaces selected by this field and - the ones listed in the namespaces field. null selector - and null or empty namespaces list means "this pod's - namespace". An empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list of namespace - names that the term applies to. The term is applied - to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. null or - empty namespaces list and null namespaceSelector means - "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object fromPostgresVersion: description: The major version of PostgreSQL before the upgrade. - maximum: 16 - minimum: 10 + maximum: 17 + minimum: 11 type: integer image: description: The image name to use for major PostgreSQL upgrades. type: string imagePullPolicy: - description: 'ImagePullPolicy is used to determine when Kubernetes - will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy enum: - Always - Never - IfNotPresent type: string imagePullSecrets: - description: The image pull secrets used to pull from a private registry. + description: |- + The image pull secrets used to pull from a private registry. Changing this value causes all running PGUpgrade pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string type: object + x-kubernetes-map-type: atomic type: array metadata: description: Metadata contains metadata for custom resources @@ -901,12 +1018,39 @@ spec: minLength: 1 type: string priorityClassName: - description: 'Priority class name for the PGUpgrade pod. Changing - this value causes PGUpgrade pod to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the PGUpgrade pod. Changing this + value causes PGUpgrade pod to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: description: Resource requirements for the PGUpgrade container. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -914,8 +1058,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -924,59 +1069,61 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object toPostgresImage: - description: The image name to use for PostgreSQL containers after - upgrade. When omitted, the value comes from an operator environment - variable. + description: |- + The image name to use for PostgreSQL containers after upgrade. + When omitted, the value comes from an operator environment variable. type: string toPostgresVersion: description: The major version of PostgreSQL to be upgraded to. - maximum: 16 - minimum: 10 + maximum: 17 + minimum: 11 type: integer tolerations: - description: 'Tolerations of the PGUpgrade pod. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the PGUpgrade pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -992,43 +1139,35 @@ spec: description: conditions represent the observations of PGUpgrade's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - type FooStatus struct{ // Represents the observations of a foo's - current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -1043,10 +1182,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 3ab640bc08..6f9dd40f02 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -1,12 +1,9 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null - labels: - app.kubernetes.io/name: pgo - app.kubernetes.io/version: latest + controller-gen.kubebuilder.io/version: v0.16.4 name: postgresclusters.postgres-operator.crunchydata.com spec: group: postgres-operator.crunchydata.com @@ -23,14 +20,19 @@ spec: description: PostgresCluster is the Schema for the postgresclusters API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -44,30 +46,121 @@ spec: description: pgBackRest archive configuration properties: configuration: - description: 'Projected volumes containing custom pgBackRest - configuration. These files are mounted under "/etc/pgbackrest/conf.d" - alongside any pgBackRest configuration generated by the - PostgreSQL Operator: https://pgbackrest.org/configuration.html' + description: |- + Projected volumes containing custom pgBackRest configuration. These files are mounted + under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the + PostgreSQL Operator: + https://pgbackrest.org/configuration.html items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -76,39 +169,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -123,8 +220,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' + the pod: only annotations, labels, name, + namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -138,17 +235,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to - set permissions on this file, must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -159,10 +254,9 @@ spec: the relative path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -183,27 +277,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -212,66 +305,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -281,48 +376,46 @@ spec: global: additionalProperties: type: string - description: 'Global pgBackRest configuration settings. These - settings are included in the "global" section of the pgBackRest - configuration generated by the PostgreSQL Operator, and - then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.html' + description: |- + Global pgBackRest configuration settings. These settings are included in the "global" + section of the pgBackRest configuration generated by the PostgreSQL Operator, and then + mounted under "/etc/pgbackrest/conf.d": + https://pgbackrest.org/configuration.html type: object image: - description: The image name to use for pgBackRest containers. Utilized - to run pgBackRest repository hosts and backups. The image - may also be set using the RELATED_IMAGE_PGBACKREST environment - variable + description: |- + The image name to use for pgBackRest containers. Utilized to run + pgBackRest repository hosts and backups. The image may also be set using + the RELATED_IMAGE_PGBACKREST environment variable type: string jobs: description: Jobs field allows configuration for all backup jobs properties: affinity: - description: 'Scheduling constraints of pgBackRest backup - Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -332,85 +425,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -422,112 +502,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -535,20 +603,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -559,19 +623,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -579,66 +642,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -646,76 +725,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -723,41 +787,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -765,60 +826,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -826,67 +909,59 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -894,20 +969,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -918,19 +989,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -938,66 +1008,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1005,76 +1091,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -1082,41 +1153,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1124,60 +1192,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1185,77 +1275,97 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object priorityClassName: - description: 'Priority class name for the pgBackRest backup - Job pods. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: - description: Resource limits for backup jobs. Includes - manual, scheduled and replica create backups + description: |- + Resource limits for backup jobs. Includes manual, scheduled and replica + create backups properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -1263,8 +1373,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1273,61 +1384,58 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: - description: 'Tolerations of pgBackRest backup Job pods. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of pgBackRest backup Job pods. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array ttlSecondsAfterFinished: - description: 'Limit the lifetime of a Job that has finished. - More info: https://kubernetes.io/docs/concepts/workloads/controllers/job' + description: |- + Limit the lifetime of a Job that has finished. + More info: https://kubernetes.io/docs/concepts/workloads/controllers/job format: int32 minimum: 60 type: integer @@ -1337,8 +1445,9 @@ spec: Jobs properties: options: - description: Command line options to include when running - the pgBackRest backup command. https://pgbackrest.org/command.html#command-backup + description: |- + Command line options to include when running the pgBackRest backup command. + https://pgbackrest.org/command.html#command-backup items: type: string type: array @@ -1363,40 +1472,36 @@ spec: type: object type: object repoHost: - description: Defines configuration for a pgBackRest dedicated - repository host. This section is only applicable if at - least one "volume" (i.e. PVC-based) repository is defined - in the "repos" section, therefore enabling a dedicated repository - host Deployment. + description: |- + Defines configuration for a pgBackRest dedicated repository host. This section is only + applicable if at least one "volume" (i.e. PVC-based) repository is defined in the "repos" + section, therefore enabling a dedicated repository host Deployment. properties: affinity: - description: 'Scheduling constraints of the Dedicated - repo host pod. Changing this value causes repo host - to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the Dedicated repo host pod. + Changing this value causes repo host to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -1406,85 +1511,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -1496,112 +1588,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -1609,20 +1689,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -1633,19 +1709,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1653,66 +1728,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1720,76 +1811,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -1797,41 +1873,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1839,60 +1912,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -1900,67 +1995,59 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -1968,20 +2055,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -1992,19 +2075,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2012,66 +2094,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2079,76 +2177,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -2156,41 +2239,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2198,60 +2278,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -2259,78 +2361,97 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object priorityClassName: - description: 'Priority class name for the pgBackRest repo - host pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest repo host pod. Changing this value + causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string resources: description: Resource requirements for a pgBackRest repository host properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -2338,8 +2459,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2348,29 +2470,27 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object sshConfigMap: - description: 'ConfigMap containing custom SSH configuration. - Deprecated: Repository hosts use mTLS for encryption, - authentication, and authorization.' + description: |- + ConfigMap containing custom SSH configuration. + Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -2379,22 +2499,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -2402,30 +2520,36 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic sshSecret: - description: 'Secret containing custom SSH keys. Deprecated: - Repository hosts use mTLS for encryption, authentication, - and authorization.' + description: |- + Secret containing custom SSH keys. + Deprecated: Repository hosts use mTLS for encryption, authentication, and authorization. properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the Secret, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -2434,22 +2558,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -2457,208 +2579,236 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic tolerations: - description: 'Tolerations of a PgBackRest repo host pod. - Changing this value causes a restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a PgBackRest repo host pod. Changing this value causes a restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a Dedicated - repo host pod. Changing this value causes the repo host - to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a Dedicated repo host pod. Changing this + value causes the repo host to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are - counted to determine the number of pods in their - corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between - the number of matching pods in the target topology - and the global minimum. The global minimum is - the minimum number of matching pods in an eligible - domain or zero if the number of eligible domains - is less than MinDomains. For example, in a 3-zone - cluster, MaxSkew is set to 1, and pods with the - same labelSelector spread as 2/2/1: In this case, - the global minimum is 1. | zone1 | zone2 | zone3 - | | P P | P P | P | - if MaxSkew is 1, - incoming pod can only be scheduled to zone3 to - become 2/2/2; scheduling it onto zone1(zone2) - would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - if MaxSkew is 2, incoming - pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default - value is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible - domains with matching topology keys is less than - minDomains, Pod Topology Spread treats \"global - minimum\" as 0, and then the calculation of Skew - is performed. And when the number of eligible - domains with matching topology keys equals or - greater than minDomains, this value has no effect - on scheduling. As a result, when the number of - eligible domains is less than minDomains, scheduler - won't schedule more than maxSkew Pods to those - domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are - integers greater than 0. When value is not nil, - WhenUnsatisfiable must be DoNotSchedule. \n For - example, in a 3-zone cluster, MaxSkew is set to - 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: | zone1 | zone2 - | zone3 | | P P | P P | P P | The number - of domains is less than 5(MinDomains), so \"global - minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be - scheduled, because computed skew will be 3(3 - - 0) if new Pod is scheduled to any of the three - zones, it will violate MaxSkew. \n This is an - alpha field and requires enabling MinDomainsInPodTopologySpread - feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and - try to put balanced number of pods into each bucket. - We define a domain as a particular instance of - a topology. Also, we define an eligible domain - as a domain whose nodes match the node selector. - e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if - TopologyKey is "topology.kubernetes.io/zone", - each zone is a domain of that topology. It's a - required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to - deal with a pod if it doesn''t satisfy the spread - constraint. - DoNotSchedule (default) tells the - scheduler not to schedule it. - ScheduleAnyway - tells the scheduler to schedule the pod in any - location, but giving higher precedence to topologies - that would help reduce the skew. A constraint - is considered "Unsatisfiable" for an incoming - pod if and only if every possible node assignment - for that pod would violate "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set - to 1, and pods with the same labelSelector spread - as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t - make it *more* imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -2699,8 +2849,9 @@ spec: pattern: ^repo[1-4] type: string s3: - description: RepoS3 represents a pgBackRest repository - that is created using AWS S3 (or S3-compatible) storage + description: |- + RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) + storage properties: bucket: description: The S3 bucket utilized for the repository @@ -2719,26 +2870,30 @@ spec: - region type: object schedules: - description: 'Defines the schedules for the pgBackRest - backups Full, Differential and Incremental backup - types are supported: https://pgbackrest.org/user-guide.html#concept/backup' + description: |- + Defines the schedules for the pgBackRest backups + Full, Differential and Incremental backup types are supported: + https://pgbackrest.org/user-guide.html#concept/backup properties: differential: - description: 'Defines the Cron schedule for a differential - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a differential pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string full: - description: 'Defines the Cron schedule for a full - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a full pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string incremental: - description: 'Defines the Cron schedule for an incremental - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for an incremental pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string type: object @@ -2751,32 +2906,29 @@ spec: used to create and/or bind a volume properties: accessModes: - description: 'accessModes contains the desired - access modes the volume should have. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string - minItems: 1 type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to - specify either: * An existing VolumeSnapshot - object (snapshot.storage.k8s.io/VolumeSnapshot) + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller - can support the specified data source, it - will create a new volume based on the contents - of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always - have the same contents as the DataSourceRef - field.' + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup - is not specified, the specified Kind must - be in the core API group. For any other - third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -2790,40 +2942,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, - if a non-empty volume is desired. This may - be any local object from a non-empty API group - (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume - binding will only succeed if the type of the - specified object matches some installed volume - populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards - compatibility, both fields (DataSource and - DataSourceRef) will be set to the same value - automatically if one of them is empty and - the other is non-empty. There are two important - differences between DataSource and DataSourceRef: - * While DataSource only allows two specific - types of objects, DataSourceRef allows any - non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed - values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed - value is specified. (Beta) Using this field - requires the AnyVolumeDataSource feature gate - to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup - is not specified, the specified Kind must - be in the core API group. For any other - third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -2833,18 +2983,23 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum - resources the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than - previous value but must still be higher than - capacity recorded in the status field of the - claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -2853,9 +3008,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. More - info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -2864,17 +3019,12 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. - If Requests is omitted for a container, - it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - required: - - storage + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object - required: - - requests type: object selector: description: selector is a label query over @@ -2885,68 +3035,82 @@ spec: of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of - the StorageClass required by the claim. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of - volume is required by the claim. Value of - Filesystem is implied when not included in - claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string - required: - - accessModes - - resources type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) required: - volumeClaimSpec type: object @@ -2963,32 +3127,29 @@ spec: using pgBackRest properties: affinity: - description: 'Scheduling constraints of the pgBackRest - restore Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) - with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling - term matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -2998,85 +3159,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in @@ -3088,112 +3236,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to an update), the system - may or may not try to eventually evict the pod - from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector - term matches no objects. The requirements - of them are ANDed. The TopologySelectorTerm - type implements a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's - relationship to a set of values. - Valid operators are In, NotIn, - Exists, DoesNotExist. Gt, and - Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string - values. If the operator is In - or NotIn, the values array must - be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - If the operator is Gt or Lt, - the values array must have a - single element, which will be - interpreted as an integer. This - array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules @@ -3201,20 +3337,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a - node that violates one or more of the expressions. - The node that is most preferred is the one with - the greatest sum of weights, i.e. for each node - that meets all of the scheduling requirements - (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by - iterating through the elements of this field - and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most - preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -3225,19 +3357,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3245,66 +3376,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3312,76 +3459,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -3389,41 +3521,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, - the pod will not be scheduled onto the node. - If the affinity requirements specified by this - field cease to be met at some point during pod - execution (e.g. due to a pod label update), - the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3431,60 +3560,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3492,67 +3643,59 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -3560,20 +3703,16 @@ spec: zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity - expressions specified by this field, but it - may choose a node that violates one or more - of the expressions. The node that is most preferred - is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - anti-affinity expressions, etc.), compute a - sum by iterating through the elements of this - field and adding "weight" to the sum if the - node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest - sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node @@ -3584,19 +3723,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set - of resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3604,66 +3742,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the - set of namespaces that the term applies - to. The term is applied to the union - of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty - namespaces list means "this pod's - namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector - requirement is a selector that - contains values, a key, and - an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3671,76 +3825,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to - a set of values. Valid operators - are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an - array of string values. - If the operator is In or - NotIn, the values array - must be non-empty. If the - operator is Exists or DoesNotExist, - the values array must be - empty. This array is replaced - during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map - of {key,value} pairs. A single - {key,value} in the matchLabels - map is equivalent to an element - of matchExpressions, whose key - field is "key", the operator is - "In", and the values array contains - only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a - static list of namespace names that - the term applies to. The term is applied - to the union of the namespaces listed - in this field and the ones selected - by namespaceSelector. null or empty - namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where - co-located is defined as running on - a node whose value of the label with - key topologyKey matches that of any - node on which any of the selected - pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in - the range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -3748,41 +3887,38 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements - specified by this field are not met at scheduling - time, the pod will not be scheduled onto the - node. If the anti-affinity requirements specified - by this field cease to be met at some point - during pod execution (e.g. due to a pod label - update), the system may or may not try to eventually - evict the pod from its node. When there are - multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the - given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) - with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on - which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3790,60 +3926,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -3851,80 +4009,70 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object clusterName: - description: The name of an existing PostgresCluster to - use as the data source for the new PostgresCluster. - Defaults to the name of the PostgresCluster being created - if not provided. + description: |- + The name of an existing PostgresCluster to use as the data source for the new PostgresCluster. + Defaults to the name of the PostgresCluster being created if not provided. type: string clusterNamespace: - description: The namespace of the cluster specified as - the data source using the clusterName field. Defaults - to the namespace of the PostgresCluster being created - if not provided. + description: |- + The namespace of the cluster specified as the data source using the clusterName field. + Defaults to the namespace of the PostgresCluster being created if not provided. type: string enabled: default: false @@ -3932,27 +4080,55 @@ spec: are enabled for this PostgresCluster. type: boolean options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore items: type: string type: array priorityClassName: - description: 'Priority class name for the pgBackRest restore - Job pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string repoName: - description: The name of the pgBackRest repo within the - source PostgresCluster that contains the backups that - should be utilized to perform a pgBackRest restore when - initializing the data source for the new PostgresCluster. + description: |- + The name of the pgBackRest repo within the source PostgresCluster that contains the backups + that should be utilized to perform a pgBackRest restore when initializing the data source + for the new PostgresCluster. pattern: ^repo[1-4] type: string resources: description: Resource requirements for the pgBackRest restore Job. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -3960,8 +4136,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -3970,55 +4147,51 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: - description: 'Tolerations of the pgBackRest restore Job. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to - tolerates any taint that matches the triple - using the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to - match. Empty means match all taint effects. When - specified, allowed values are NoSchedule, PreferNoSchedule - and NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; - this combination means to match all values and - all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints - of a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect - NoExecute, otherwise this field is ignored) tolerates - the taint. By default, it is not set, which means - tolerate the taint forever (do not evict). Zero - and negative values will be treated as 0 (evict - immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value - should be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -4036,6 +4209,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4043,8 +4242,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -4053,12 +4253,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object @@ -4069,6 +4268,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4076,8 +4301,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -4086,12 +4312,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object @@ -4099,8 +4324,17 @@ spec: required: - repos type: object - required: - - pgbackrest + snapshots: + description: VolumeSnapshot configuration + properties: + volumeSnapshotClassName: + description: Name of the VolumeSnapshotClass that should be + used by VolumeSnapshots + minLength: 1 + type: string + required: + - volumeSnapshotClassName + type: object type: object config: properties: @@ -4109,21 +4343,111 @@ spec: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced ConfigMap will - be projected into the volume as a file whose name - is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a - key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -4132,22 +4456,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -4155,14 +4477,22 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -4175,8 +4505,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of the - pod: only annotations, labels, name and namespace - are supported.' + pod: only annotations, labels, name, namespace + and uid are supported.' properties: apiVersion: description: Version of the schema the FieldPath @@ -4189,17 +4519,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to set - permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and decimal - values, JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict with - other options that affect the file mode, like - fsGroup, and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -4210,10 +4538,9 @@ spec: path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required for @@ -4233,26 +4560,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced Secret will be - projected into the volume as a file whose name is - the key and content is the value. If specified, the - listed keys will be projected into the specified paths, - and unlisted keys will not be present. If a key is - specified which is not present in the Secret, the - volume setup will error unless it is marked optional. - Paths must be relative and may not contain the '..' - path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -4261,22 +4588,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both octal - and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume - defaultMode will be used. This might be in conflict - with other options that affect the file mode, - like fsGroup, and the result can be other mode - bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -4284,39 +4609,47 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of the - token. A recipient of a token must identify itself - with an identifier specified in the audience of the - token, and otherwise should reject the token. The - audience defaults to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested duration - of validity of the service account token. As the token - approaches expiration, the kubelet volume plugin will - proactively rotate the service account token. The - kubelet will start trying to rotate the token if the - token is older than 80 percent of its time to live - or if the token is older than 24 hours.Defaults to - 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -4325,23 +4658,23 @@ spec: type: array type: object customReplicationTLSSecret: - description: 'The secret containing the replication client certificates - and keys for secure connections to the PostgreSQL server. It will - need to contain the client TLS certificate, TLS key and the Certificate - Authority certificate with the data keys set to tls.crt, tls.key - and ca.crt, respectively. NOTE: If CustomReplicationClientTLSSecret - is provided, CustomTLSSecret MUST be provided and the ca.crt provided - must be the same.' + description: |- + The secret containing the replication client certificates and keys for + secure connections to the PostgreSQL server. It will need to contain the + client TLS certificate, TLS key and the Certificate Authority certificate + with the data keys set to tls.crt, tls.key and ca.crt, respectively. + NOTE: If CustomReplicationClientTLSSecret is provided, CustomTLSSecret + MUST be provided and the ca.crt provided must be the same. properties: items: - description: items if unspecified, each key-value pair in the - Data field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the value. - If specified, the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a key is specified - which is not present in the Secret, the volume setup will error - unless it is marked optional. Paths must be relative and may - not contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: @@ -4349,54 +4682,64 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 and - 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the file to map - the key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic customTLSSecret: - description: 'The secret containing the Certificates and Keys to encrypt - PostgreSQL traffic will need to contain the server TLS certificate, - TLS key and the Certificate Authority certificate with the data - keys set to tls.crt, tls.key and ca.crt, respectively. It will then - be mounted as a volume projection to the ''/pgconf/tls'' directory. - For more information on Kubernetes secret projections, please see + description: |- + The secret containing the Certificates and Keys to encrypt PostgreSQL + traffic will need to contain the server TLS certificate, TLS key and the + Certificate Authority certificate with the data keys set to tls.crt, + tls.key and ca.crt, respectively. It will then be mounted as a volume + projection to the '/pgconf/tls' directory. For more information on + Kubernetes secret projections, please see https://k8s.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths NOTE: If CustomTLSSecret is provided, CustomReplicationClientTLSSecret - MUST be provided and the ca.crt provided must be the same.' + MUST be provided and the ca.crt provided must be the same. properties: items: - description: items if unspecified, each key-value pair in the - Data field of the referenced Secret will be projected into the - volume as a file whose name is the key and content is the value. - If specified, the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If a key is specified - which is not present in the Secret, the volume setup will error - unless it is marked optional. Paths must be relative and may - not contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: @@ -4404,72 +4747,78 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 and - 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other options - that affect the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the file to map - the key to. May not be an absolute path. May not contain - the path element '..'. May not start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic dataSource: description: Specifies a data source for bootstrapping the PostgreSQL cluster. properties: pgbackrest: - description: 'Defines a pgBackRest cloud-based data source that - can be used to pre-populate the PostgreSQL data directory for - a new PostgreSQL cluster using a pgBackRest restore. The PGBackRest - field is incompatible with the PostgresCluster field: only one - data source can be used for pre-populating a new PostgreSQL - cluster' + description: |- + Defines a pgBackRest cloud-based data source that can be used to pre-populate the + PostgreSQL data directory for a new PostgreSQL cluster using a pgBackRest restore. + The PGBackRest field is incompatible with the PostgresCluster field: only one + data source can be used for pre-populating a new PostgreSQL cluster properties: affinity: - description: 'Scheduling constraints of the pgBackRest restore - Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -4479,79 +4828,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -4563,105 +4905,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -4669,19 +5006,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -4692,18 +5026,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -4711,60 +5045,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -4772,70 +5128,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -4843,161 +5190,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -5005,19 +5370,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -5028,18 +5390,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -5047,60 +5409,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -5108,70 +5492,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -5179,188 +5554,297 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object configuration: - description: 'Projected volumes containing custom pgBackRest - configuration. These files are mounted under "/etc/pgbackrest/conf.d" - alongside any pgBackRest configuration generated by the - PostgreSQL Operator: https://pgbackrest.org/configuration.html' + description: |- + Projected volumes containing custom pgBackRest configuration. These files are mounted + under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the + PostgreSQL Operator: + https://pgbackrest.org/configuration.html items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the ConfigMap, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -5369,39 +5853,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -5416,8 +5904,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of - the pod: only annotations, labels, name - and namespace are supported.' + the pod: only annotations, labels, name, + namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -5431,17 +5919,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used to - set permissions on this file, must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -5452,10 +5938,9 @@ spec: the relative path must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, requests.cpu and requests.memory) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -5476,27 +5961,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. - If a key is specified which is not present in - the Secret, the volume setup will error unless - it is marked optional. Paths must be relative - and may not contain the '..' path or start with - '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -5505,66 +5989,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. Must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON - requires decimal values for mode bits. If - not specified, the volume defaultMode will - be used. This might be in conflict with - other options that affect the file mode, - like fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of - the file to map the key to. May not be an - absolute path. May not contain the path - element '..'. May not start with the string - '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience of - the token. A recipient of a token must identify - itself with an identifier specified in the audience - of the token, and otherwise should reject the - token. The audience defaults to the identifier - of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account token. - As the token approaches expiration, the kubelet - volume plugin will proactively rotate the service - account token. The kubelet will start trying to - rotate the token if the token is older than 80 - percent of its time to live or if the token is - older than 24 hours.Defaults to 1 hour and must - be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the mount - point of the file to project the token into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -5574,21 +6060,24 @@ spec: global: additionalProperties: type: string - description: 'Global pgBackRest configuration settings. These - settings are included in the "global" section of the pgBackRest - configuration generated by the PostgreSQL Operator, and - then mounted under "/etc/pgbackrest/conf.d": https://pgbackrest.org/configuration.html' + description: |- + Global pgBackRest configuration settings. These settings are included in the "global" + section of the pgBackRest configuration generated by the PostgreSQL Operator, and then + mounted under "/etc/pgbackrest/conf.d": + https://pgbackrest.org/configuration.html type: object options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore items: type: string type: array priorityClassName: - description: 'Priority class name for the pgBackRest restore - Job pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string repo: description: Defines a pgBackRest repository @@ -5619,8 +6108,9 @@ spec: pattern: ^repo[1-4] type: string s3: - description: RepoS3 represents a pgBackRest repository - that is created using AWS S3 (or S3-compatible) storage + description: |- + RepoS3 represents a pgBackRest repository that is created using AWS S3 (or S3-compatible) + storage properties: bucket: description: The S3 bucket utilized for the repository @@ -5638,26 +6128,30 @@ spec: - region type: object schedules: - description: 'Defines the schedules for the pgBackRest - backups Full, Differential and Incremental backup types - are supported: https://pgbackrest.org/user-guide.html#concept/backup' + description: |- + Defines the schedules for the pgBackRest backups + Full, Differential and Incremental backup types are supported: + https://pgbackrest.org/user-guide.html#concept/backup properties: differential: - description: 'Defines the Cron schedule for a differential - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a differential pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string full: - description: 'Defines the Cron schedule for a full - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for a full pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string incremental: - description: 'Defines the Cron schedule for an incremental - pgBackRest backup. Follows the standard Cron schedule - syntax: https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax' + description: |- + Defines the Cron schedule for an incremental pgBackRest backup. + Follows the standard Cron schedule syntax: + https://k8s.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax minLength: 6 type: string type: object @@ -5670,30 +6164,29 @@ spec: used to create and/or bind a volume properties: accessModes: - description: 'accessModes contains the desired - access modes the volume should have. More info: - https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to - specify either: * An existing VolumeSnapshot - object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If - the provisioner or an external controller can - support the specified data source, it will create - a new volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature - gate is enabled, this field will always have - the same contents as the DataSourceRef field.' + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup is - not specified, the specified Kind must be - in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -5707,39 +6200,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object - from which to populate the volume with data, - if a non-empty volume is desired. This may be - any local object from a non-empty API group - (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume - binding will only succeed if the type of the - specified object matches some installed volume - populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards - compatibility, both fields (DataSource and DataSourceRef) - will be set to the same value automatically - if one of them is empty and the other is non-empty. - There are two important differences between - DataSource and DataSourceRef: * While DataSource - only allows two specific types of objects, DataSourceRef - allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed - values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed - value is specified. (Beta) Using this field - requires the AnyVolumeDataSource feature gate - to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup is - not specified, the specified Kind must be - in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -5749,18 +6241,23 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum - resources the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity - recorded in the status field of the claim. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -5769,9 +6266,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. More - info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -5780,12 +6277,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum - amount of compute resources required. If - Requests is omitted for a container, it - defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -5797,63 +6293,82 @@ spec: label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of - the StorageClass required by the claim. More - info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem - is implied when not included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) required: - volumeClaimSpec type: object @@ -5864,6 +6379,31 @@ spec: description: Resource requirements for the pgBackRest restore Job. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5871,8 +6411,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -5881,59 +6422,57 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object stanza: default: db - description: The name of an existing pgBackRest stanza to - use as the data source for the new PostgresCluster. Defaults - to `db` if not provided. + description: |- + The name of an existing pgBackRest stanza to use as the data source for the new PostgresCluster. + Defaults to `db` if not provided. type: string tolerations: - description: 'Tolerations of the pgBackRest restore Job. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -5942,38 +6481,36 @@ spec: - stanza type: object postgresCluster: - description: 'Defines a pgBackRest data source that can be used - to pre-populate the PostgreSQL data directory for a new PostgreSQL - cluster using a pgBackRest restore. The PGBackRest field is - incompatible with the PostgresCluster field: only one data source - can be used for pre-populating a new PostgreSQL cluster' + description: |- + Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data + directory for a new PostgreSQL cluster using a pgBackRest restore. + The PGBackRest field is incompatible with the PostgresCluster field: only one + data source can be used for pre-populating a new PostgreSQL cluster properties: affinity: - description: 'Scheduling constraints of the pgBackRest restore - Job. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -5983,79 +6520,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -6067,105 +6597,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -6173,19 +6698,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -6196,18 +6718,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6215,60 +6737,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6276,70 +6820,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -6347,161 +6882,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -6509,19 +7062,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -6532,18 +7082,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6551,60 +7101,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -6612,70 +7184,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -6683,196 +7246,240 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object clusterName: - description: The name of an existing PostgresCluster to use - as the data source for the new PostgresCluster. Defaults - to the name of the PostgresCluster being created if not - provided. + description: |- + The name of an existing PostgresCluster to use as the data source for the new PostgresCluster. + Defaults to the name of the PostgresCluster being created if not provided. type: string clusterNamespace: - description: The namespace of the cluster specified as the - data source using the clusterName field. Defaults to the - namespace of the PostgresCluster being created if not provided. + description: |- + The namespace of the cluster specified as the data source using the clusterName field. + Defaults to the namespace of the PostgresCluster being created if not provided. type: string options: - description: Command line options to include when running - the pgBackRest restore command. https://pgbackrest.org/command.html#command-restore + description: |- + Command line options to include when running the pgBackRest restore command. + https://pgbackrest.org/command.html#command-restore items: type: string type: array priorityClassName: - description: 'Priority class name for the pgBackRest restore - Job pod. Changing this value causes PostgreSQL to restart. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBackRest restore Job pod. Changing this + value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string repoName: - description: The name of the pgBackRest repo within the source - PostgresCluster that contains the backups that should be - utilized to perform a pgBackRest restore when initializing - the data source for the new PostgresCluster. + description: |- + The name of the pgBackRest repo within the source PostgresCluster that contains the backups + that should be utilized to perform a pgBackRest restore when initializing the data source + for the new PostgresCluster. pattern: ^repo[1-4] type: string resources: description: Resource requirements for the pgBackRest restore Job. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6880,8 +7487,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -6890,53 +7498,51 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object tolerations: - description: 'Tolerations of the pgBackRest restore Job. More - info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of the pgBackRest restore Job. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array @@ -6947,12 +7553,14 @@ spec: description: Defines any existing volumes to reuse for this PostgresCluster. properties: pgBackRestVolume: - description: Defines the existing pgBackRest repo volume and - directory to use in the current PostgresCluster. + description: |- + Defines the existing pgBackRest repo volume and directory to use in the + current PostgresCluster. properties: directory: - description: The existing directory. When not set, a move - Job is not created for the associated volume. + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. type: string pvcName: description: The existing PVC name. @@ -6961,12 +7569,14 @@ spec: - pvcName type: object pgDataVolume: - description: Defines the existing pgData volume and directory - to use in the current PostgresCluster. + description: |- + Defines the existing pgData volume and directory to use in the current + PostgresCluster. properties: directory: - description: The existing directory. When not set, a move - Job is not created for the associated volume. + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. type: string pvcName: description: The existing PVC name. @@ -6975,13 +7585,15 @@ spec: - pvcName type: object pgWALVolume: - description: Defines the existing pg_wal volume and directory - to use in the current PostgresCluster. Note that a defined - pg_wal volume MUST be accompanied by a pgData volume. + description: |- + Defines the existing pg_wal volume and directory to use in the current + PostgresCluster. Note that a defined pg_wal volume MUST be accompanied by + a pgData volume. properties: directory: - description: The existing directory. When not set, a move - Job is not created for the associated volume. + description: |- + The existing directory. When not set, a move Job is not created for the + associated volume. type: string pvcName: description: The existing PVC name. @@ -6992,9 +7604,10 @@ spec: type: object type: object databaseInitSQL: - description: DatabaseInitSQL defines a ConfigMap containing custom - SQL that will be run after the cluster is initialized. This ConfigMap - must be in the same namespace as the cluster. + description: |- + DatabaseInitSQL defines a ConfigMap containing custom SQL that will + be run after the cluster is initialized. This ConfigMap must be in the same + namespace as the cluster. properties: key: description: Key is the ConfigMap data key that points to a SQL @@ -7008,69 +7621,84 @@ spec: - name type: object disableDefaultPodScheduling: - description: Whether or not the PostgreSQL cluster should use the - defined default scheduling constraints. If the field is unset or - false, the default scheduling constraints will be used in addition - to any custom constraints provided. + description: |- + Whether or not the PostgreSQL cluster should use the defined default + scheduling constraints. If the field is unset or false, the default + scheduling constraints will be used in addition to any custom constraints + provided. type: boolean image: - description: The image name to use for PostgreSQL containers. When - omitted, the value comes from an operator environment variable. - For standard PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, + description: |- + The image name to use for PostgreSQL containers. When omitted, the value + comes from an operator environment variable. For standard PostgreSQL images, + the format is RELATED_IMAGE_POSTGRES_{postgresVersion}, e.g. RELATED_IMAGE_POSTGRES_13. For PostGIS enabled PostgreSQL images, the format is RELATED_IMAGE_POSTGRES_{postgresVersion}_GIS_{postGISVersion}, e.g. RELATED_IMAGE_POSTGRES_13_GIS_3.1. type: string imagePullPolicy: - description: 'ImagePullPolicy is used to determine when Kubernetes - will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' + description: |- + ImagePullPolicy is used to determine when Kubernetes will attempt to + pull (download) container images. + More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy enum: - Always - Never - IfNotPresent type: string imagePullSecrets: - description: The image pull secrets used to pull from a private registry - Changing this value causes all running pods to restart. https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + description: |- + The image pull secrets used to pull from a private registry + Changing this value causes all running pods to restart. + https://k8s.io/docs/tasks/configure-pod-container/pull-image-private-registry/ items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string type: object + x-kubernetes-map-type: atomic type: array instances: - description: Specifies one or more sets of PostgreSQL pods that replicate - data for this cluster. + description: |- + Specifies one or more sets of PostgreSQL pods that replicate data for + this cluster. items: properties: affinity: - description: 'Scheduling constraints of a PostgreSQL pod. Changing - this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of a PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the affinity expressions specified - by this field, but it may choose a node that violates - one or more of the expressions. The node that is most - preferred is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating - through the elements of this field and adding "weight" - to the sum if the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a - no-op). A null preferred scheduling term matches - no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -7080,79 +7708,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the range @@ -7164,105 +7785,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an - update), the system may or may not try to eventually - evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, the - values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. If - the operator is Gt or Lt, the values - array must have a single element, - which will be interpreted as an integer. - This array is replaced during a strategic - merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -7270,18 +7886,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the affinity expressions specified - by this field, but it may choose a node that violates - one or more of the expressions. The node that is most - preferred is the one with the greatest sum of weights, - i.e. for each node that meets all of the scheduling - requirements (resource request, requiredDuringScheduling - affinity expressions, etc.), compute a sum by iterating - through the elements of this field and adding "weight" - to the sum if the node has pods which matches the - corresponding podAffinityTerm; the node(s) with the - highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -7292,144 +7906,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of - namespaces that the term applies to. The - term is applied to the union of the namespaces - selected by this field and the ones listed - in the namespaces field. null selector and - null or empty namespaces list means "this - pod's namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term applies - to. The term is applied to the union of - the namespaces listed in this field and - the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose value - of the label with key topologyKey matches - that of any node on which any of the selected - pods is running. Empty topologyKey is not - allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the - corresponding podAffinityTerm, in the range - 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -7437,158 +8068,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a - pod label update), the system may or may not try to - eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all - terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or - not co-located (anti-affinity) with, where co-located - is defined as running on a node whose value of the - label with key matches that of any - node on which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified - namespaces, where co-located is defined as running - on a node whose value of the label with key - topologyKey matches that of any node on which - any of the selected pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -7596,18 +8248,16 @@ spec: as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods - to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the greatest - sum of weights, i.e. for each node that meets all - of the scheduling requirements (resource request, - requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if the - node has pods which matches the corresponding podAffinityTerm; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -7618,144 +8268,161 @@ spec: with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of - namespaces that the term applies to. The - term is applied to the union of the namespaces - selected by this field and the ones listed - in the namespaces field. null selector and - null or empty namespaces list means "this - pod's namespace". An empty selector ({}) - matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term applies - to. The term is applied to the union of - the namespaces listed in this field and - the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector - means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose value - of the label with key topologyKey matches - that of any node on which any of the selected - pods is running. Empty topologyKey is not - allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching the - corresponding podAffinityTerm, in the range - 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -7763,199 +8430,221 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the anti-affinity - requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a - pod label update), the system may or may not try to - eventually evict the pod from its node. When there - are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all - terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or - not co-located (anti-affinity) with, where co-located - is defined as running on a node whose value of the - label with key matches that of any - node on which a pod of the set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by this - field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, a - key, and an operator that relates the - key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected by - namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified - namespaces, where co-located is defined as running - on a node whose value of the label with key - topologyKey matches that of any node on which - any of the selected pods is running. Empty topologyKey - is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object containers: - description: Custom sidecars for PostgreSQL instance pods. Changing - this value causes PostgreSQL to restart. + description: |- + Custom sidecars for PostgreSQL instance pods. Changing this value causes + PostgreSQL to restart. items: description: A single application container that you want to run within a pod. properties: args: - description: 'Arguments to the entrypoint. The container - image''s CMD is used if this is not provided. Variable - references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the reference - in the input string will be unchanged. Double $$ are - reduced to a single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the variable - exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic command: - description: 'Entrypoint array. Not executed within a - shell. The container image''s ENTRYPOINT is used if - this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If - a variable cannot be resolved, the reference in the - input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic env: - description: List of environment variables to set in the - container. Cannot be updated. + description: |- + List of environment variables to set in the container. + Cannot be updated. items: description: EnvVar represents an environment variable present in a Container. @@ -7965,17 +8654,16 @@ spec: be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are - expanded using the previously defined environment - variables in the container and any service environment - variables. If a variable cannot be resolved, the - reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults - to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's @@ -7988,8 +8676,13 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap @@ -7998,12 +8691,11 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports - metadata.name, metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath @@ -8016,12 +8708,11 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, requests.cpu, - requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for @@ -8042,6 +8733,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -8051,8 +8743,13 @@ spec: from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret @@ -8061,19 +8758,22 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: - description: List of sources to populate environment variables - in the container. The keys defined within a source must - be a C_IDENTIFIER. All invalid keys will be reported - as an event when the container is starting. When a key - exists in multiple sources, the value associated with - the last source will take precedence. Values defined - by an Env with a duplicate key will take precedence. + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. Cannot be updated. items: description: EnvFromSource represents the source of @@ -8083,14 +8783,20 @@ spec: description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -8099,65 +8805,72 @@ spec: description: The Secret to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config - management to default or override container images in - workload controllers like Deployments and StatefulSets.' + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string imagePullPolicy: - description: 'Image pull policy. One of Always, Never, - IfNotPresent. Defaults to Always if :latest tag is specified, - or IfNotPresent otherwise. Cannot be updated. More info: - https://kubernetes.io/docs/concepts/containers/images#updating-images' + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images type: string lifecycle: - description: Actions that the management system should - take in response to container lifecycle events. Cannot - be updated. + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. properties: postStart: - description: 'PostStart is called immediately after - a container is created. If the handler fails, the - container is terminated and restarted according - to its restart policy. Other management of the container - blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside a - shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you - need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -8168,7 +8881,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8178,6 +8893,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8185,24 +8901,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of this - field and lifecycle hooks will fail in runtime - when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -8212,55 +8940,49 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately before - a container is terminated due to an API request - or management event such as liveness/startup probe - failure, preemption, resource contention, etc. The - handler is not called if the container crashes or - exits. The Pod''s termination grace period countdown - begins before the PreStop hook is executed. Regardless - of the outcome of the handler, the container will - eventually terminate within the Pod''s termination - grace period (unless delayed by finalizers). Other - management of the container blocks until the hook - completes or until the termination grace period - is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside a - shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, you - need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -8271,7 +8993,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8281,6 +9005,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8288,24 +9013,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of this - field and lifecycle hooks will fail in runtime - when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -8315,10 +9052,10 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to - access on the container. Number must be - in the range 1 to 65535. Name must be an - IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -8326,37 +9063,36 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. Container - will be restarted if the probe fails. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory - for the command is root ('/') in the container's - filesystem. The command is simply exec'd, it - is not run inside a shell, so traditional shell - instructions ('|', etc) won't work. To use a - shell, you need to explicitly call out to that - shell. Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving a - GRPC port. This is a beta field and requires enabling - GRPCContainerProbe feature gate. + GRPC port. properties: port: description: Port number of the gRPC service. @@ -8364,11 +9100,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -8378,9 +9115,9 @@ spec: perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -8390,7 +9127,9 @@ spec: to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8400,6 +9139,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8407,33 +9147,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. - Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -8448,60 +9190,59 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides the - value provided by the pod spec. Value must be non-negative - integer. The value zero indicates stop immediately - via the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe - times out. Defaults to 1 second. Minimum value is - 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object name: - description: Name of the container specified as a DNS_LABEL. + description: |- + Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. type: string ports: - description: List of ports to expose from the container. - Exposing a port here gives the system additional information - about the network connections a container uses, but - is primarily informational. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port - which is listening on the default "0.0.0.0" address - inside a container will be accessible from the network. + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: containerPort: - description: Number of port to expose on the pod's - IP address. This must be a valid port number, - 0 < x < 65536. + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: @@ -8509,23 +9250,24 @@ spec: to. type: string hostPort: - description: Number of port to expose on the host. - If specified, this must be a valid port number, - 0 < x < 65536. If HostNetwork is specified, this - must match ContainerPort. Most containers do not - need this. + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in - a pod must have a unique name. Name for the port - that can be referred to by services. + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, TCP, - or SCTP. Defaults to "TCP". + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string required: - containerPort @@ -8536,37 +9278,36 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service readiness. - Container will be removed from service endpoints if - the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory - for the command is root ('/') in the container's - filesystem. The command is simply exec'd, it - is not run inside a shell, so traditional shell - instructions ('|', etc) won't work. To use a - shell, you need to explicitly call out to that - shell. Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving a - GRPC port. This is a beta field and requires enabling - GRPCContainerProbe feature gate. + GRPC port. properties: port: description: Port number of the gRPC service. @@ -8574,11 +9315,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -8588,9 +9330,9 @@ spec: perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -8600,7 +9342,9 @@ spec: to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8610,6 +9354,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8617,33 +9362,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. - Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -8658,42 +9405,90 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides the - value provided by the pod spec. Value must be non-negative - integer. The value zero indicates stop immediately - via the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe - times out. Defaults to 1 second. Minimum value is - 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this container. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -8701,8 +9496,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -8711,33 +9507,76 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string securityContext: - description: 'SecurityContext defines the security options - the container should be run with. If set, the fields - of SecurityContext override the equivalent fields of - PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls whether - a process can gain more privileges than its parent - process. This bool directly controls if the no_new_privs - flag will be set on the container process. AllowPrivilegeEscalation - is true always when the container is: 1) run as - Privileged 2) has CAP_SYS_ADMIN Note that this field - cannot be set when spec.os.name is windows.' + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: - description: The capabilities to add/drop when running - containers. Defaults to the default set of capabilities - granted by the container runtime. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. properties: add: description: Added capabilities @@ -8746,6 +9585,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -8753,65 +9593,63 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: - description: Run container in privileged mode. Processes - in privileged containers are essentially equivalent - to root on the host. Defaults to false. Note that - this field cannot be set when spec.os.name is windows. + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc mount - to use for the containers. The default is DefaultProcMount - which uses the container runtime defaults for readonly - paths and masked paths. This requires the ProcMountType - feature flag to be enabled. Note that this field - cannot be set when spec.os.name is windows. + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only - root filesystem. Default is false. Note that this - field cannot be set when spec.os.name is windows. + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of the - container process. Uses runtime default if unset. - May also be set in PodSecurityContext. If set in - both SecurityContext and PodSecurityContext, the - value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run - as a non-root user. If true, the Kubelet will validate - the image at runtime to ensure that it does not - run as UID 0 (root) and fail to start the container - if it does. If unset or false, no such validation - will be performed. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the - container process. Defaults to user specified in - image metadata if unspecified. May also be set in - PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in SecurityContext - takes precedence. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to - the container. If unspecified, the container runtime - will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that @@ -8831,113 +9669,98 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by this container. - If seccomp options are provided at both the pod - & container level, the container options override - the pod options. Note that this field cannot be - set when spec.os.name is windows. + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates a profile - defined in a file on the node should be used. - The profile must be preconfigured on the node - to work. Must be a descending path, relative - to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: 'type indicates which kind of seccomp - profile will be applied. Valid options are: - Localhost - a profile defined in a file on the - node should be used. RuntimeDefault - the container - runtime default profile should be used. Unconfined - - no profile should be applied.' + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options from - the PodSecurityContext will be used. If set in both - SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence. Note - that this field cannot be set when spec.os.name - is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the GMSA - admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a container - should be run as a 'Host Process' container. - This field is alpha-level and will only be honored - by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, - if HostProcess is true then HostNetwork must - also be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run the - entrypoint of the container process. Defaults - to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod has - successfully initialized. If specified, no other probes - are executed until this completes successfully. If this - probe fails, the Pod will be restarted, just as if the - livenessProbe failed. This can be used to provide different - probe parameters at the beginning of a Pod''s lifecycle, - when it might take a long time to load data or warm - a cache, than during steady-state operation. This cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to execute - inside the container, the working directory - for the command is root ('/') in the container's - filesystem. The command is simply exec'd, it - is not run inside a shell, so traditional shell - instructions ('|', etc) won't work. To use a - shell, you need to explicitly call out to that - shell. Exit status of 0 is treated as live/healthy - and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving a - GRPC port. This is a beta field and requires enabling - GRPCContainerProbe feature gate. + GRPC port. properties: port: description: Port number of the gRPC service. @@ -8945,11 +9768,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -8959,9 +9783,9 @@ spec: perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -8971,7 +9795,9 @@ spec: to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -8981,6 +9807,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -8988,33 +9815,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to the - host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. - Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -9029,81 +9858,76 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides the - value provided by the pod spec. Value must be non-negative - integer. The value zero indicates stop immediately - via the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod - feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the probe - times out. Defaults to 1 second. Minimum value is - 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object stdin: - description: Whether this container should allocate a - buffer for stdin in the container runtime. If this is - not set, reads from stdin in the container will always - result in EOF. Default is false. + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean stdinOnce: - description: Whether the container runtime should close - the stdin channel after it has been opened by a single - attach. When stdin is true the stdin stream will remain - open across multiple attach sessions. If stdinOnce is - set to true, stdin is opened on container start, is - empty until the first client attaches to stdin, and - then remains open and accepts data until the client - disconnects, at which time stdin is closed and remains - closed until the container is restarted. If this flag - is false, a container processes that reads from stdin - will never receive an EOF. Default is false + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to which - the container''s termination message will be written - is mounted into the container''s filesystem. Message - written is intended to be brief final status, such as - an assertion failure message. Will be truncated by the - node if greater than 4096 bytes. The total message length - across all containers will be limited to 12kb. Defaults - to /dev/termination-log. Cannot be updated.' + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string terminationMessagePolicy: - description: Indicate how the termination message should - be populated. File will use the contents of terminationMessagePath - to populate the container status message on both success - and failure. FallbackToLogsOnError will use the last - chunk of container log output if the termination message - file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, - whichever is smaller. Defaults to File. Cannot be updated. + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string tty: - description: Whether this container should allocate a - TTY for itself, also requires 'stdin' to be true. Default - is false. + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean volumeDevices: description: volumeDevices is the list of block devices @@ -9125,86 +9949,118 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: - description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: Path within the container at which - the volume should be mounted. Must not contain - ':'. + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts - are propagated from the host to container and - the other way around. When not set, MountPropagationNone - is used. This field is beta in 1.10. + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write - otherwise (false or unspecified). Defaults to - false. + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: - description: Path within the volume from which the - container's volume should be mounted. Defaults - to "" (volume's root). + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from - which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable - references $(VAR_NAME) are expanded using the - container's environment. Defaults to "" (volume's - root). SubPathExpr and SubPath are mutually exclusive. + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which - might be configured in the container image. Cannot be - updated. + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. type: string required: - name type: object type: array dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for PostgreSQL - data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for PostgreSQL data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string - minItems: 1 type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify either: + description: |- + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified data - source, it will create a new volume based on the contents - of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have the - same contents as the DataSourceRef field.' + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9216,34 +10072,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from which - to populate the volume with data, if a non-empty volume - is desired. This may be any local object from a non-empty - API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding will - only succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource - field and as such if both fields are non-empty, they must - have the same value. For backwards compatibility, both - fields (DataSource and DataSourceRef) will be set to the - same value automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While DataSource - only allows two specific types of objects, DataSourceRef - allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9251,17 +10111,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify resource - requirements that are lower than previous value but must - still be higher than capacity recorded in the status field - of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -9270,8 +10136,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9280,16 +10147,12 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - required: - - storage + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object - required: - - requests type: object selector: description: selector is a label query over volumes to consider @@ -9299,8 +10162,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -9308,52 +10171,72 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string - required: - - accessModes - - resources type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) metadata: description: Metadata contains metadata for custom resources properties: @@ -9370,22 +10253,24 @@ spec: anyOf: - type: integer - type: string - description: Minimum number of pods that should be available - at a time. Defaults to one when the replicas field is greater - than one. + description: |- + Minimum number of pods that should be available at a time. + Defaults to one when the replicas field is greater than one. x-kubernetes-int-or-string: true name: default: "" - description: Name that associates this set of PostgreSQL pods. - This field is optional when only one instance set is defined. - Each instance set in a cluster must have a unique name. The - combined length of this and the cluster name must be 46 characters - or less. + description: |- + Name that associates this set of PostgreSQL pods. This field is optional + when only one instance set is defined. Each instance set in a cluster + must have a unique name. The combined length of this and the cluster name + must be 46 characters or less. pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ type: string priorityClassName: - description: 'Priority class name for the PostgreSQL pod. Changing - this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string replicas: default: 1 @@ -9396,6 +10281,31 @@ spec: resources: description: Compute resources of a PostgreSQL container. properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -9403,8 +10313,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9413,11 +10324,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object sidecars: @@ -9430,6 +10341,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -9437,8 +10374,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9447,48 +10385,50 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: object tablespaceVolumes: - description: The list of tablespaces volumes to mount for this - postgrescluster This field requires enabling TablespaceVolumes - feature gate + description: |- + The list of tablespaces volumes to mount for this postgrescluster + This field requires enabling TablespaceVolumes feature gate items: properties: dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for a tablespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for a tablespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the - provisioner or an external controller can support - the specified data source, it will create a new - volume based on the contents of the specified data - source. If the AnyVolumeDataSource feature gate - is enabled, this field will always have the same - contents as the DataSourceRef field.' + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is - required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being @@ -9502,37 +10442,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from - which to populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed if - the type of the specified object matches some installed - volume populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will - be set to the same value automatically if one of - them is empty and the other is non-empty. There - are two important differences between DataSource - and DataSourceRef: * While DataSource only allows - two specific types of objects, DataSourceRef allows - any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, - and generates an error if a disallowed value is - specified. (Beta) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. - For any other third-party types, APIGroup is - required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being @@ -9542,17 +10483,23 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify - resource requirements that are lower than previous - value but must still be higher than capacity recorded - in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -9561,8 +10508,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9571,12 +10519,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -9588,63 +10535,86 @@ spec: selector requirements. The requirements are ANDed. items: - description: A label selector requirement is - a selector that contains values, a key, and - an operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If - the operator is Exists or DoesNotExist, - the values array must be empty. This array - is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". - The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the - StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume - is required by the claim. Value of Filesystem is - implied when not included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) + > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) name: - description: The name for the tablespace, used as the - path name for the volume. Must be unique in the instance - set since they become the directory names. + description: |- + The name for the tablespace, used as the path name for the volume. + Must be unique in the instance set since they become the directory names. minLength: 1 pattern: ^[a-z][a-z0-9]*$ type: string @@ -9657,67 +10627,67 @@ spec: - name x-kubernetes-list-type: map tolerations: - description: 'Tolerations of a PostgreSQL pod. Changing this - value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a PostgreSQL pod. Changing this value causes PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a PostgreSQL pod. - Changing this value causes PostgreSQL to restart. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a PostgreSQL pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine - the number of pods in their corresponding topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -9725,115 +10695,150 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which pods - may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the number - of matching pods in the target topology and the global - minimum. The global minimum is the minimum number of - matching pods in an eligible domain or zero if the number - of eligible domains is less than MinDomains. For example, - in a 3-zone cluster, MaxSkew is set to 1, and pods with - the same labelSelector spread as 2/2/1: In this case, - the global minimum is 1. | zone1 | zone2 | zone3 | | P - P | P P | P | - if MaxSkew is 1, incoming pod - can only be scheduled to zone3 to become 2/2/2; scheduling - it onto zone1(zone2) would make the ActualSkew(3-1) - on zone1(zone2) violate MaxSkew(1). - if MaxSkew is - 2, incoming pod can be scheduled onto any zone. When - `whenUnsatisfiable=ScheduleAnyway`, it is used to give - higher precedence to topologies that satisfy it. It''s - a required field. Default value is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number of - eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And when - the number of eligible domains with matching topology - keys equals or greater than minDomains, this value has - no effect on scheduling. As a result, when the number - of eligible domains is less than minDomains, scheduler - won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains - is equal to 1. Valid values are integers greater than - 0. When value is not nil, WhenUnsatisfiable must be - DoNotSchedule. \n For example, in a 3-zone cluster, - MaxSkew is set to 2, MinDomains is set to 5 and pods - with the same labelSelector spread as 2/2/2: | zone1 - | zone2 | zone3 | | P P | P P | P P | The number - of domains is less than 5(MinDomains), so \"global minimum\" - is treated as 0. In this situation, new pod with the - same labelSelector cannot be scheduled, because computed - skew will be 3(3 - 0) if new Pod is scheduled to any - of the three zones, it will violate MaxSkew. \n This - is an alpha field and requires enabling MinDomainsInPodTopologySpread - feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. Nodes - that have a label with this key and identical values - are considered to be in the same topology. We consider - each as a "bucket", and try to put balanced - number of pods into each bucket. We define a domain - as a particular instance of a topology. Also, we define - an eligible domain as a domain whose nodes match the - node selector. e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if TopologyKey - is "topology.kubernetes.io/zone", each zone is a domain - of that topology. It's a required field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not to - schedule it. - ScheduleAnyway tells the scheduler to - schedule the pod in any location, but giving higher - precedence to topologies that would help reduce the - skew. A constraint is considered "Unsatisfiable" for - an incoming pod if and only if every possible node assignment - for that pod would violate "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming - pod can only be scheduled to zone2(zone3) to become - 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be - imbalanced, but scheduler won''t make it *more* imbalanced. - It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -9842,31 +10847,34 @@ spec: type: object type: array walVolumeClaimSpec: - description: 'Defines a separate PersistentVolumeClaim for PostgreSQL''s - write-ahead log. More info: https://www.postgresql.org/docs/current/wal.html' + description: |- + Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. + More info: https://www.postgresql.org/docs/current/wal.html properties: accessModes: - description: 'accessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string - minItems: 1 type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify either: + description: |- + dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified data - source, it will create a new volume based on the contents - of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have the - same contents as the DataSourceRef field.' + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9878,34 +10886,38 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from which - to populate the volume with data, if a non-empty volume - is desired. This may be any local object from a non-empty - API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding will - only succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource - field and as such if both fields are non-empty, they must - have the same value. For backwards compatibility, both - fields (DataSource and DataSourceRef) will be set to the - same value automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While DataSource - only allows two specific types of objects, DataSourceRef - allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, the - specified Kind must be in the core API group. For - any other third-party types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource being referenced @@ -9913,17 +10925,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify resource - requirements that are lower than previous value but must - still be higher than capacity recorded in the status field - of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -9932,8 +10950,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of - compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -9942,16 +10961,12 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - required: - - storage + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object - required: - - requests type: object selector: description: selector is a label query over volumes to consider @@ -9961,8 +10976,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -9970,52 +10985,72 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists - or DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not - included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string - required: - - accessModes - - resources type: object + x-kubernetes-validations: + - message: missing accessModes + rule: has(self.accessModes) && size(self.accessModes) > 0 + - message: missing storage request + rule: has(self.resources) && has(self.resources.requests) + && has(self.resources.requests.storage) required: - dataVolumeClaimSpec type: object @@ -10047,34 +11082,122 @@ spec: exporter: properties: configuration: - description: 'Projected volumes containing custom PostgreSQL - Exporter configuration. Currently supports the customization - of PostgreSQL Exporter queries. If a "queries.yml" file - is detected in any volume projected using this field, - it will be loaded using the "extend.query-path" flag: + description: |- + Projected volumes containing custom PostgreSQL Exporter configuration. Currently supports + the customization of PostgreSQL Exporter queries. If a "queries.yml" file is detected in + any volume projected using this field, it will be loaded using the "extend.query-path" flag: https://github.com/prometheus-community/postgres_exporter#flags - Changing the values of field causes PostgreSQL and the - exporter to restart.' + Changing the values of field causes PostgreSQL and the exporter to restart. items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -10083,41 +11206,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -10133,7 +11258,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema @@ -10147,19 +11272,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -10171,11 +11292,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -10197,27 +11316,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -10226,69 +11344,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -10296,20 +11413,19 @@ spec: type: object type: array customTLSSecret: - description: Projected secret containing custom TLS certificates - to encrypt output from the exporter web server + description: |- + Projected secret containing custom TLS certificates to encrypt output from the exporter + web server properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file whose - name is the key and content is the value. If specified, - the listed keys will be projected into the specified - paths, and unlisted keys will not be present. If - a key is specified which is not present in the Secret, - the volume setup will error unless it is marked - optional. Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -10318,22 +11434,20 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used - to set permissions on this file. Must be an - octal value between 0000 and 0777 or a decimal - value between 0 and 511. YAML accepts both - octal and decimal values, JSON requires decimal - values for mode bits. If not specified, the - volume defaultMode will be used. This might - be in conflict with other options that affect - the file mode, like fsGroup, and the result - can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the - file to map the key to. May not be an absolute - path. May not contain the path element '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. May not start with the string '..'. type: string required: @@ -10341,23 +11455,58 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic image: - description: The image name to use for crunchy-postgres-exporter - containers. The image may also be set using the RELATED_IMAGE_PGEXPORTER - environment variable. + description: |- + The image name to use for crunchy-postgres-exporter containers. The image may + also be set using the RELATED_IMAGE_PGEXPORTER environment variable. type: string resources: - description: 'Changing this value causes PostgreSQL and - the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + description: |- + Changing this value causes PostgreSQL and the exporter to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -10365,8 +11514,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -10375,33 +11525,36 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: object type: object openshift: - description: Whether or not the PostgreSQL cluster is being deployed - to an OpenShift environment. If the field is unset, the operator - will automatically detect the environment. + description: |- + Whether or not the PostgreSQL cluster is being deployed to an OpenShift + environment. If the field is unset, the operator will automatically + detect the environment. type: boolean patroni: properties: dynamicConfiguration: - description: 'Patroni dynamic configuration settings. Changes - to this value will be automatically reloaded without validation. - Changes to certain PostgreSQL parameters cause PostgreSQL to - restart. More info: https://patroni.readthedocs.io/en/latest/dynamic_configuration.html' + description: |- + Patroni dynamic configuration settings. Changes to this value will be + automatically reloaded without validation. Changes to certain PostgreSQL + parameters cause PostgreSQL to restart. + More info: https://patroni.readthedocs.io/en/latest/dynamic_configuration.html type: object x-kubernetes-preserve-unknown-fields: true leaderLeaseDurationSeconds: default: 30 - description: TTL of the cluster leader lock. "Think of it as the + description: |- + TTL of the cluster leader lock. "Think of it as the length of time before initiation of the automatic failover process." Changing this value causes PostgreSQL to restart. format: int32 @@ -10409,8 +11562,9 @@ spec: type: integer port: default: 8008 - description: The port on which Patroni should listen. Changing - this value causes PostgreSQL to restart. + description: |- + The port on which Patroni should listen. + Changing this value causes PostgreSQL to restart. format: int32 minimum: 1024 type: integer @@ -10423,20 +11577,19 @@ spec: in a PostgresCluster type: boolean targetInstance: - description: The instance that should become primary during - a switchover. This field is optional when Type is "Switchover" - and required when Type is "Failover". When it is not specified, - a healthy replica is automatically selected. + description: |- + The instance that should become primary during a switchover. This field is + optional when Type is "Switchover" and required when Type is "Failover". + When it is not specified, a healthy replica is automatically selected. type: string type: default: Switchover - description: 'Type of switchover to perform. Valid options - are Switchover and Failover. "Switchover" changes the primary - instance of a healthy PostgresCluster. "Failover" forces - a particular instance to be primary, regardless of other + description: |- + Type of switchover to perform. Valid options are Switchover and Failover. + "Switchover" changes the primary instance of a healthy PostgresCluster. + "Failover" forces a particular instance to be primary, regardless of other factors. A TargetInstance must be specified to failover. - NOTE: The Failover type is reserved as the "last resort" - case.' + NOTE: The Failover type is reserved as the "last resort" case. enum: - Switchover - Failover @@ -10446,7 +11599,8 @@ spec: type: object syncPeriodSeconds: default: 10 - description: The interval for refreshing the leader lock and applying + description: |- + The interval for refreshing the leader lock and applying dynamicConfiguration. Must be less than leaderLeaseDurationSeconds. Changing this value causes PostgreSQL to restart. format: int32 @@ -10454,8 +11608,9 @@ spec: type: integer type: object paused: - description: Suspends the rollout and reconciliation of changes made - to the PostgresCluster spec. + description: |- + Suspends the rollout and reconciliation of changes made to the + PostgresCluster spec. type: boolean port: default: 5432 @@ -10464,15 +11619,15 @@ spec: minimum: 1024 type: integer postGISVersion: - description: The PostGIS extension version installed in the PostgreSQL - image. When image is not set, indicates a PostGIS enabled image - will be used. + description: |- + The PostGIS extension version installed in the PostgreSQL image. + When image is not set, indicates a PostGIS enabled image will be used. type: string postgresVersion: description: The major version of PostgreSQL installed in the PostgreSQL image - maximum: 16 - minimum: 10 + maximum: 17 + minimum: 11 type: integer proxy: description: The specification of a proxy that connects to PostgreSQL. @@ -10481,31 +11636,30 @@ spec: description: Defines a PgBouncer proxy and connection pooler. properties: affinity: - description: 'Scheduling constraints of a PgBouncer pod. Changing - this value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of a PgBouncer pod. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -10515,79 +11669,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -10599,105 +11746,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -10705,19 +11847,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -10728,18 +11867,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -10747,60 +11886,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -10808,70 +11969,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -10879,161 +12031,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -11041,19 +12211,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -11064,18 +12231,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -11083,60 +12250,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -11144,70 +12333,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -11215,206 +12395,317 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object config: - description: 'Configuration settings for the PgBouncer process. - Changes to any of these values will be automatically reloaded - without validation. Be careful, as you may put PgBouncer - into an unusable state. More info: https://www.pgbouncer.org/usage.html#reload' + description: |- + Configuration settings for the PgBouncer process. Changes to any of these + values will be automatically reloaded without validation. Be careful, as + you may put PgBouncer into an unusable state. + More info: https://www.pgbouncer.org/usage.html#reload properties: databases: additionalProperties: type: string - description: 'PgBouncer database definitions. The key - is the database requested by a client while the value - is a libpq-styled connection string. The special key - "*" acts as a fallback. When this field is empty, PgBouncer - is configured with a single "*" entry that connects - to the primary PostgreSQL instance. More info: https://www.pgbouncer.org/config.html#section-databases' + description: |- + PgBouncer database definitions. The key is the database requested by a + client while the value is a libpq-styled connection string. The special + key "*" acts as a fallback. When this field is empty, PgBouncer is + configured with a single "*" entry that connects to the primary + PostgreSQL instance. + More info: https://www.pgbouncer.org/config.html#section-databases type: object files: - description: 'Files to mount under "/etc/pgbouncer". When - specified, settings in the "pgbouncer.ini" file are - loaded before all others. From there, other files may - be included by absolute path. Changing these references - causes PgBouncer to restart, but changes to the file - contents are automatically reloaded. More info: https://www.pgbouncer.org/config.html#include-directive' + description: |- + Files to mount under "/etc/pgbouncer". When specified, settings in the + "pgbouncer.ini" file are loaded before all others. From there, other + files may be included by absolute path. Changing these references causes + PgBouncer to restart, but changes to the file contents are automatically + reloaded. + More info: https://www.pgbouncer.org/config.html#include-directive items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -11423,41 +12714,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -11473,7 +12766,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema @@ -11487,19 +12780,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -11511,11 +12800,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -11537,27 +12824,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -11566,69 +12852,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -11638,56 +12923,58 @@ spec: global: additionalProperties: type: string - description: 'Settings that apply to the entire PgBouncer - process. More info: https://www.pgbouncer.org/config.html' + description: |- + Settings that apply to the entire PgBouncer process. + More info: https://www.pgbouncer.org/config.html type: object users: additionalProperties: type: string - description: 'Connection settings specific to particular - users. More info: https://www.pgbouncer.org/config.html#section-users' + description: |- + Connection settings specific to particular users. + More info: https://www.pgbouncer.org/config.html#section-users type: object type: object containers: - description: Custom sidecars for a PgBouncer pod. Changing - this value causes PgBouncer to restart. + description: |- + Custom sidecars for a PgBouncer pod. Changing this value causes + PgBouncer to restart. items: description: A single application container that you want to run within a pod. properties: args: - description: 'Arguments to the entrypoint. The container - image''s CMD is used if this is not provided. Variable - references $(VAR_NAME) are expanded using the container''s - environment. If a variable cannot be resolved, the - reference in the input string will be unchanged. Double - $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce - the string literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of whether the - variable exists or not. Cannot be updated. More info: - https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic command: - description: 'Entrypoint array. Not executed within - a shell. The container image''s ENTRYPOINT is used - if this is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. If - a variable cannot be resolved, the reference in the - input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) - syntax: i.e. "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists - or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell items: type: string type: array + x-kubernetes-list-type: atomic env: - description: List of environment variables to set in - the container. Cannot be updated. + description: |- + List of environment variables to set in the container. + Cannot be updated. items: description: EnvVar represents an environment variable present in a Container. @@ -11697,17 +12984,16 @@ spec: Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) - are expanded using the previously defined environment - variables in the container and any service environment - variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults - to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's @@ -11720,8 +13006,13 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap @@ -11730,12 +13021,11 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: - supports metadata.name, metadata.namespace, - `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the @@ -11749,12 +13039,11 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: - only resources limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, - requests.cpu, requests.memory and requests.ephemeral-storage) - are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required @@ -11775,6 +13064,7 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic secretKeyRef: description: Selects a key of a secret in the pod's namespace @@ -11785,8 +13075,13 @@ spec: key. type: string name: - description: 'Name of the referent. More - info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret @@ -11795,20 +13090,23 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic type: object required: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: - description: List of sources to populate environment - variables in the container. The keys defined within - a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is - starting. When a key exists in multiple sources, the - value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will - take precedence. Cannot be updated. + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: description: EnvFromSource represents the source of a set of ConfigMaps @@ -11817,14 +13115,20 @@ spec: description: The ConfigMap to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the ConfigMap must be defined type: boolean type: object + x-kubernetes-map-type: atomic prefix: description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. @@ -11833,66 +13137,72 @@ spec: description: The Secret to select from properties: name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret must be defined type: boolean type: object + x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config - management to default or override container images - in workload controllers like Deployments and StatefulSets.' + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string imagePullPolicy: - description: 'Image pull policy. One of Always, Never, - IfNotPresent. Defaults to Always if :latest tag is - specified, or IfNotPresent otherwise. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images type: string lifecycle: - description: Actions that the management system should - take in response to container lifecycle events. Cannot - be updated. + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. properties: postStart: - description: 'PostStart is called immediately after - a container is created. If the handler fails, - the container is terminated and restarted according - to its restart policy. Other management of the - container blocks until the hook completes. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -11903,7 +13213,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -11913,6 +13225,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -11921,24 +13234,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of - this field and lifecycle hooks will fail in - runtime when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -11948,55 +13273,49 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object type: object preStop: - description: 'PreStop is called immediately before - a container is terminated due to an API request - or management event such as liveness/startup probe - failure, preemption, resource contention, etc. - The handler is not called if the container crashes - or exits. The Pod''s termination grace period - countdown begins before the PreStop hook is executed. - Regardless of the outcome of the handler, the - container will eventually terminate within the - Pod''s termination grace period (unless delayed - by finalizers). Other management of the container - blocks until the hook completes or until the termination - grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line - to execute inside the container, the working - directory for the command is root ('/') - in the container's filesystem. The command - is simply exec'd, it is not run inside - a shell, so traditional shell instructions - ('|', etc) won't work. To use a shell, - you need to explicitly call out to that - shell. Exit status of 0 is treated as - live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http request to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. type: string httpHeaders: @@ -12007,7 +13326,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12017,6 +13338,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -12025,24 +13347,36 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: - description: Deprecated. TCPSocket is NOT supported - as a LifecycleHandler and kept for the backward - compatibility. There are no validation of - this field and lifecycle hooks will fail in - runtime when tcp handler is specified. + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: host: description: 'Optional: Host name to connect @@ -12052,10 +13386,10 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port - to access on the container. Number must - be in the range 1 to 65535. Name must - be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port @@ -12063,37 +13397,36 @@ spec: type: object type: object livenessProbe: - description: 'Periodic probe of container liveness. - Container will be restarted if the probe fails. Cannot - be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -12101,11 +13434,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -12115,9 +13449,9 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -12127,7 +13461,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12137,6 +13473,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -12144,34 +13481,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -12186,61 +13524,59 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object name: - description: Name of the container specified as a DNS_LABEL. + description: |- + Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. type: string ports: - description: List of ports to expose from the container. - Exposing a port here gives the system additional information - about the network connections a container uses, but - is primarily informational. Not specifying a port - here DOES NOT prevent that port from being exposed. - Any port which is listening on the default "0.0.0.0" - address inside a container will be accessible from - the network. Cannot be updated. + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. items: description: ContainerPort represents a network port in a single container. properties: containerPort: - description: Number of port to expose on the pod's - IP address. This must be a valid port number, - 0 < x < 65536. + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer hostIP: @@ -12248,23 +13584,24 @@ spec: port to. type: string hostPort: - description: Number of port to expose on the host. - If specified, this must be a valid port number, - 0 < x < 65536. If HostNetwork is specified, - this must match ContainerPort. Most containers - do not need this. + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer name: - description: If specified, this must be an IANA_SVC_NAME - and unique within the pod. Each named port in - a pod must have a unique name. Name for the - port that can be referred to by services. + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. type: string protocol: default: TCP - description: Protocol for port. Must be UDP, TCP, - or SCTP. Defaults to "TCP". + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". type: string required: - containerPort @@ -12275,37 +13612,36 @@ spec: - protocol x-kubernetes-list-type: map readinessProbe: - description: 'Periodic probe of container service readiness. - Container will be removed from service endpoints if - the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -12313,11 +13649,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -12327,9 +13664,9 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -12339,7 +13676,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12349,6 +13688,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -12356,34 +13696,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -12398,43 +13739,90 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: - description: 'Compute Resources required by this container. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -12442,8 +13830,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -12452,34 +13841,76 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string securityContext: - description: 'SecurityContext defines the security options - the container should be run with. If set, the fields - of SecurityContext override the equivalent fields - of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation controls - whether a process can gain more privileges than - its parent process. This bool directly controls - if the no_new_privs flag will be set on the container - process. AllowPrivilegeEscalation is true always - when the container is: 1) run as Privileged 2) - has CAP_SYS_ADMIN Note that this field cannot - be set when spec.os.name is windows.' + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: - description: The capabilities to add/drop when running - containers. Defaults to the default set of capabilities - granted by the container runtime. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. properties: add: description: Added capabilities @@ -12488,6 +13919,7 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -12495,68 +13927,63 @@ spec: type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: - description: Run container in privileged mode. Processes - in privileged containers are essentially equivalent - to root on the host. Defaults to false. Note that - this field cannot be set when spec.os.name is - windows. + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: procMount denotes the type of proc - mount to use for the containers. The default is - DefaultProcMount which uses the container runtime - defaults for readonly paths and masked paths. - This requires the ProcMountType feature flag to - be enabled. Note that this field cannot be set - when spec.os.name is windows. + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: Whether this container has a read-only - root filesystem. Default is false. Note that this - field cannot be set when spec.os.name is windows. + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: The GID to run the entrypoint of the - container process. Uses runtime default if unset. - May also be set in PodSecurityContext. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: Indicates that the container must run - as a non-root user. If true, the Kubelet will - validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start - the container if it does. If unset or false, no - such validation will be performed. May also be - set in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in - SecurityContext takes precedence. + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: The UID to run the entrypoint of the - container process. Defaults to user specified - in image metadata if unspecified. May also be - set in PodSecurityContext. If set in both SecurityContext - and PodSecurityContext, the value specified in - SecurityContext takes precedence. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: The SELinux context to be applied to - the container. If unspecified, the container runtime - will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is windows. + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that @@ -12576,114 +14003,98 @@ spec: type: string type: object seccompProfile: - description: The seccomp options to use by this - container. If seccomp options are provided at - both the pod & container level, the container - options override the pod options. Note that this - field cannot be set when spec.os.name is windows. + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: localhostProfile indicates a profile - defined in a file on the node should be used. - The profile must be preconfigured on the node - to work. Must be a descending path, relative - to the kubelet's configured seccomp profile - location. Must only be set if type is "Localhost". + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: 'type indicates which kind of seccomp - profile will be applied. Valid options are: - Localhost - a profile defined in a file on - the node should be used. RuntimeDefault - - the container runtime default profile should - be used. Unconfined - no profile should be - applied.' + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: The Windows specific settings applied - to all containers. If unspecified, the options - from the PodSecurityContext will be used. If set - in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name - is linux. + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: GMSACredentialSpec is where the - GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA credential - spec named by the GMSACredentialSpecName field. + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: HostProcess determines if a container - should be run as a 'Host Process' container. - This field is alpha-level and will only be - honored by components that enable the WindowsHostProcessContainers - feature flag. Setting this field without the - feature flag will result in errors when validating - the Pod. All of a Pod's containers must have - the same effective HostProcess value (it is - not allowed to have a mix of HostProcess containers - and non-HostProcess containers). In addition, - if HostProcess is true then HostNetwork must - also be set to true. + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: The UserName in Windows to run - the entrypoint of the container process. Defaults - to the user specified in image metadata if - unspecified. May also be set in PodSecurityContext. - If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes - precedence. + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object startupProbe: - description: 'StartupProbe indicates that the Pod has - successfully initialized. If specified, no other probes - are executed until this completes successfully. If - this probe fails, the Pod will be restarted, just - as if the livenessProbe failed. This can be used to - provide different probe parameters at the beginning - of a Pod''s lifecycle, when it might take a long time - to load data or warm a cache, than during steady-state - operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: description: Exec specifies the action to take. properties: command: - description: Command is the command line to - execute inside the container, the working - directory for the command is root ('/') in - the container's filesystem. The command is - simply exec'd, it is not run inside a shell, - so traditional shell instructions ('|', etc) - won't work. To use a shell, you need to explicitly - call out to that shell. Exit status of 0 is - treated as live/healthy and non-zero is unhealthy. + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: Minimum consecutive failures for the - probe to be considered failed after having succeeded. + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: description: GRPC specifies an action involving - a GRPC port. This is a beta field and requires - enabling GRPCContainerProbe feature gate. + a GRPC port. properties: port: description: Port number of the gRPC service. @@ -12691,11 +14102,12 @@ spec: format: int32 type: integer service: - description: "Service is the name of the service - to place in the gRPC HealthCheckRequest (see - https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - \n If this is not specified, the default behavior - is defined by gRPC." + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. type: string required: - port @@ -12705,9 +14117,9 @@ spec: to perform. properties: host: - description: Host name to connect to, defaults - to the pod IP. You probably want to set "Host" - in httpHeaders instead. + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: description: Custom headers to set in the request. @@ -12717,7 +14129,9 @@ spec: header to be used in HTTP probes properties: name: - description: The header field name + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: description: The header field value @@ -12727,6 +14141,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. type: string @@ -12734,34 +14149,35 @@ spec: anyOf: - type: integer - type: string - description: Name or number of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: Scheme to use for connecting to - the host. Defaults to HTTP. + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: 'Number of seconds after the container - has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: How often (in seconds) to perform the - probe. Default to 10 seconds. Minimum value is - 1. + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: Minimum consecutive successes for the - probe to be considered successful after having - failed. Defaults to 1. Must be 1 for liveness - and startup. Minimum value is 1. + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: @@ -12776,83 +14192,75 @@ spec: anyOf: - type: integer - type: string - description: Number or name of the port to access - on the container. Number must be in the range - 1 to 65535. Name must be an IANA_SVC_NAME. + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: Optional duration in seconds the pod - needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after - the processes running in the pod are sent a termination - signal and the time when the processes are forcibly - halted with a kill signal. Set this value longer - than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value overrides - the value provided by the pod spec. Value must - be non-negative integer. The value zero indicates - stop immediately via the kill signal (no opportunity - to shut down). This is a beta field and requires - enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: 'Number of seconds after which the - probe times out. Defaults to 1 second. Minimum - value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object stdin: - description: Whether this container should allocate - a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will - always result in EOF. Default is false. + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean stdinOnce: - description: Whether the container runtime should close - the stdin channel after it has been opened by a single - attach. When stdin is true the stdin stream will remain - open across multiple attach sessions. If stdinOnce - is set to true, stdin is opened on container start, - is empty until the first client attaches to stdin, - and then remains open and accepts data until the client - disconnects, at which time stdin is closed and remains - closed until the container is restarted. If this flag - is false, a container processes that reads from stdin - will never receive an EOF. Default is false + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean terminationMessagePath: - description: 'Optional: Path at which the file to which - the container''s termination message will be written - is mounted into the container''s filesystem. Message - written is intended to be brief final status, such - as an assertion failure message. Will be truncated - by the node if greater than 4096 bytes. The total - message length across all containers will be limited - to 12kb. Defaults to /dev/termination-log. Cannot - be updated.' + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string terminationMessagePolicy: - description: Indicate how the termination message should - be populated. File will use the contents of terminationMessagePath - to populate the container status message on both success - and failure. FallbackToLogsOnError will use the last - chunk of container log output if the termination message - file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, - whichever is smaller. Defaults to File. Cannot be - updated. + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string tty: - description: Whether this container should allocate - a TTY for itself, also requires 'stdin' to be true. + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. type: boolean volumeDevices: @@ -12876,78 +14284,106 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: - description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: Path within the container at which - the volume should be mounted. Must not contain - ':'. + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. type: string mountPropagation: - description: mountPropagation determines how mounts - are propagated from the host to container and - the other way around. When not set, MountPropagationNone - is used. This field is beta in 1.10. + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. type: string readOnly: - description: Mounted read-only if true, read-write - otherwise (false or unspecified). Defaults to - false. + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: - description: Path within the volume from which - the container's volume should be mounted. Defaults - to "" (volume's root). + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). type: string subPathExpr: - description: Expanded path within the volume from - which the container's volume should be mounted. - Behaves similarly to SubPath but environment - variable references $(VAR_NAME) are expanded - using the container's environment. Defaults - to "" (volume's root). SubPathExpr and SubPath - are mutually exclusive. + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: - description: Container's working directory. If not specified, - the container runtime's default will be used, which - might be configured in the container image. Cannot - be updated. + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. type: string required: - name type: object type: array customTLSSecret: - description: 'A secret projection containing a certificate - and key with which to encrypt connections to PgBouncer. - The "tls.crt", "tls.key", and "ca.crt" paths must be PEM-encoded - certificates and keys. Changing this value causes PgBouncer - to restart. More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths' + description: |- + A secret projection containing a certificate and key with which to encrypt + connections to PgBouncer. The "tls.crt", "tls.key", and "ca.crt" paths must + be PEM-encoded certificates and keys. Changing this value causes PgBouncer + to restart. + More info: https://kubernetes.io/docs/concepts/configuration/secret/#projection-of-secret-keys-to-specific-paths properties: items: - description: items if unspecified, each key-value pair - in the Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and - content is the value. If specified, the listed keys - will be projected into the specified paths, and unlisted - keys will not be present. If a key is specified which - is not present in the Secret, the volume setup will - error unless it is marked optional. Paths must be relative - and may not contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. properties: @@ -12955,41 +14391,49 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits used to - set permissions on this file. Must be an octal - value between 0000 and 0777 or a decimal value - between 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal values for - mode bits. If not specified, the volume defaultMode - will be used. This might be in conflict with other - options that affect the file mode, like fsGroup, - and the result can be other mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path of the file - to map the key to. May not be an absolute path. - May not contain the path element '..'. May not - start with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic image: - description: 'Name of a container image that can run PgBouncer - 1.15 or newer. Changing this value causes PgBouncer to restart. - The image may also be set using the RELATED_IMAGE_PGBOUNCER - environment variable. More info: https://kubernetes.io/docs/concepts/containers/images' + description: |- + Name of a container image that can run PgBouncer 1.15 or newer. Changing + this value causes PgBouncer to restart. The image may also be set using + the RELATED_IMAGE_PGBOUNCER environment variable. + More info: https://kubernetes.io/docs/concepts/containers/images type: string metadata: description: Metadata contains metadata for custom resources @@ -13007,20 +14451,23 @@ spec: anyOf: - type: integer - type: string - description: Minimum number of pods that should be available - at a time. Defaults to one when the replicas field is greater - than one. + description: |- + Minimum number of pods that should be available at a time. + Defaults to one when the replicas field is greater than one. x-kubernetes-int-or-string: true port: default: 5432 - description: Port on which PgBouncer should listen for client - connections. Changing this value causes PgBouncer to restart. + description: |- + Port on which PgBouncer should listen for client connections. Changing + this value causes PgBouncer to restart. format: int32 minimum: 1024 type: integer priorityClassName: - description: 'Priority class name for the pgBouncer pod. Changing - this value causes PostgreSQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgBouncer pod. Changing this value causes + PostgreSQL to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string replicas: default: 1 @@ -13029,10 +14476,36 @@ spec: minimum: 0 type: integer resources: - description: 'Compute resources of a PgBouncer container. - Changing this value causes PgBouncer to restart. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + description: |- + Compute resources of a PgBouncer container. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -13040,8 +14513,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -13050,11 +14524,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object service: @@ -13085,11 +14559,11 @@ spec: type: object type: object nodePort: - description: The port on which this service is exposed - when type is NodePort or LoadBalancer. Value must be - in-range and not in use or the operation will fail. - If unspecified, a port will be allocated if this Service - requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 type: integer type: @@ -13111,6 +14585,32 @@ spec: resources: description: Resource requirements for a sidecar container properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -13118,8 +14618,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -13128,198 +14629,229 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is - omitted for a container, it defaults to Limits - if that is explicitly specified, otherwise to - an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: object tolerations: - description: 'Tolerations of a PgBouncer pod. Changing this - value causes PgBouncer to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a PgBouncer pod. Changing this value causes PgBouncer to + restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a PgBouncer pod. - Changing this value causes PgBouncer to restart. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a PgBouncer pod. Changing this value causes + PgBouncer to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are counted - to determine the number of pods in their corresponding - topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the - number of matching pods in the target topology and - the global minimum. The global minimum is the minimum - number of matching pods in an eligible domain or zero - if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as - 2/2/1: In this case, the global minimum is 1. | zone1 - | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 - to become 2/2/2; scheduling it onto zone1(zone2) would - make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto - any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default value - is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And - when the number of eligible domains with matching - topology keys equals or greater than minDomains, this - value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to - those domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are integers - greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone - cluster, MaxSkew is set to 2, MinDomains is set to - 5 and pods with the same labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), - so \"global minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod - is scheduled to any of the three zones, it will violate - MaxSkew. \n This is an alpha field and requires enabling - MinDomainsInPodTopologySpread feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and try - to put balanced number of pods into each bucket. We - define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose - nodes match the node selector. e.g. If TopologyKey - is "kubernetes.io/hostname", each Node is a domain - of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", - each zone is a domain of that topology. It's a required - field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not - to schedule it. - ScheduleAnyway tells the scheduler - to schedule the pod in any location, but giving higher - precedence to topologies that would help reduce the - skew. A constraint is considered "Unsatisfiable" for - an incoming pod if and only if every possible node - assignment for that pod would violate "MaxSkew" on - some topology. For example, in a 3-zone cluster, MaxSkew - is set to 1, and pods with the same labelSelector - spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P - | P | P | If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make - it *more* imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -13360,10 +14892,11 @@ spec: type: object type: object nodePort: - description: The port on which this service is exposed when type - is NodePort or LoadBalancer. Value must be in-range and not - in use or the operation will fail. If unspecified, a port will - be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 type: integer type: @@ -13404,10 +14937,11 @@ spec: type: object type: object nodePort: - description: The port on which this service is exposed when type - is NodePort or LoadBalancer. Value must be in-range and not - in use or the operation will fail. If unspecified, a port will - be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 type: integer type: @@ -13420,10 +14954,11 @@ spec: type: string type: object shutdown: - description: Whether or not the PostgreSQL cluster should be stopped. - When this is true, workloads are scaled to zero and CronJobs are - suspended. Other resources, such as Services and Volumes, remain - in place. + description: |- + Whether or not the PostgreSQL cluster should be stopped. + When this is true, workloads are scaled to zero and CronJobs + are suspended. + Other resources, such as Services and Volumes, remain in place. type: boolean standby: description: Run this cluster as a read-only copy of an existing cluster @@ -13431,9 +14966,10 @@ spec: properties: enabled: default: true - description: Whether or not the PostgreSQL cluster should be read-only. - When this is true, WAL files are applied from a pgBackRest repository - or another PostgreSQL server. + description: |- + Whether or not the PostgreSQL cluster should be read-only. When this is + true, WAL files are applied from a pgBackRest repository or another + PostgreSQL server. type: boolean host: description: Network address of the PostgreSQL server to follow @@ -13452,9 +14988,10 @@ spec: type: string type: object supplementalGroups: - description: 'A list of group IDs applied to the process of a container. - These can be useful when accessing shared file systems with constrained - permissions. More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context' + description: |- + A list of group IDs applied to the process of a container. These can be + useful when accessing shared file systems with constrained permissions. + More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context items: format: int64 maximum: 2147483647 @@ -13469,31 +15006,30 @@ spec: description: Defines a pgAdmin user interface. properties: affinity: - description: 'Scheduling constraints of a pgAdmin pod. Changing - this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' + description: |- + Scheduling constraints of a pgAdmin pod. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node properties: nodeAffinity: description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node matches the corresponding matchExpressions; - the node(s) with the highest sum are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: - description: An empty preferred scheduling term - matches all objects with implicit weight 0 (i.e. - it's a no-op). A null preferred scheduling term - matches no objects (i.e. is also a no-op). + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: description: A node selector term, associated @@ -13503,79 +15039,72 @@ spec: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic weight: description: Weight associated with matching the corresponding nodeSelectorTerm, in the @@ -13587,105 +15116,100 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to an update), the system may or may not try - to eventually evict the pod from its node. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: description: Required. A list of node selector terms. The terms are ORed. items: - description: A null or empty node selector term - matches no objects. The requirements of them - are ANDed. The TopologySelectorTerm type implements - a subset of the NodeSelectorTerm. + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: description: A list of node selector requirements by node's labels. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. items: - description: A node selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. properties: key: description: The label key that the selector applies to. type: string operator: - description: Represents a key's relationship - to a set of values. Valid operators - are In, NotIn, Exists, DoesNotExist. - Gt, and Lt. + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: An array of string values. - If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. - If the operator is Gt or Lt, the - values array must have a single - element, which will be interpreted - as an integer. This array is replaced - during a strategic merge patch. + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object + x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object + x-kubernetes-map-type: atomic type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. @@ -13693,19 +15217,16 @@ spec: other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements - of this field and adding "weight" to the sum if - the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -13716,18 +15237,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -13735,60 +15256,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -13796,70 +15339,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -13867,161 +15401,179 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - affinity requirements specified by this field cease - to be met at some point during pod execution (e.g. - due to a pod label update), the system may or may - not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, - i.e. all terms must be satisfied. + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules @@ -14029,19 +15581,16 @@ spec: etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule - pods to nodes that satisfy the anti-affinity expressions - specified by this field, but it may choose a node - that violates one or more of the expressions. The - node that is most preferred is the one with the - greatest sum of weights, i.e. for each node that - meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity - expressions, etc.), compute a sum by iterating through - the elements of this field and adding "weight" to - the sum if the node has pods which matches the corresponding - podAffinityTerm; the node(s) with the highest sum - are the most preferred. + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred @@ -14052,18 +15601,18 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of - resources, in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -14071,60 +15620,82 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set - of namespaces that the term applies to. - The term is applied to the union of the - namespaces selected by this field and - the ones listed in the namespaces field. - null selector and null or empty namespaces - list means "this pod's namespace". An - empty selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label @@ -14132,70 +15703,61 @@ spec: to. type: string operator: - description: operator represents - a key's relationship to a set - of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array - of string values. If the operator - is In or NotIn, the values array - must be non-empty. If the operator - is Exists or DoesNotExist, the - values array must be empty. - This array is replaced during - a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of - {key,value} pairs. A single {key,value} - in the matchLabels map is equivalent - to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are - ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static - list of namespace names that the term - applies to. The term is applied to the - union of the namespaces listed in this - field and the ones selected by namespaceSelector. - null or empty namespaces list and null - namespaceSelector means "this pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located - (affinity) or not co-located (anti-affinity) - with the pods matching the labelSelector - in the specified namespaces, where co-located - is defined as running on a node whose - value of the label with key topologyKey - matches that of any node on which any - of the selected pods is running. Empty - topologyKey is not allowed. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: weight associated with matching - the corresponding podAffinityTerm, in the - range 1-100. + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer required: @@ -14203,192 +15765,301 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified - by this field are not met at scheduling time, the - pod will not be scheduled onto the node. If the - anti-affinity requirements specified by this field - cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may - or may not try to eventually evict the pod from - its node. When there are multiple elements, the - lists of nodes corresponding to each podAffinityTerm - are intersected, i.e. all terms must be satisfied. + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: Defines a set of pods (namely those - matching the labelSelector relative to the given - namespace(s)) that this pod should be co-located - (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node - whose value of the label with key - matches that of any node on which a pod of the - set of pods is running + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, - in this case pods. + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: A label query over the set of namespaces - that the term applies to. The term is applied - to the union of the namespaces selected by - this field and the ones listed in the namespaces - field. null selector and null or empty namespaces - list means "this pod's namespace". An empty - selector ({}) matches all namespaces. + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement - is a selector that contains values, - a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a - key's relationship to a set of values. - Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of - string values. If the operator is - In or NotIn, the values array must - be non-empty. If the operator is - Exists or DoesNotExist, the values - array must be empty. This array - is replaced during a strategic merge - patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator - is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic namespaces: - description: namespaces specifies a static list - of namespace names that the term applies to. - The term is applied to the union of the namespaces - listed in this field and the ones selected - by namespaceSelector. null or empty namespaces - list and null namespaceSelector means "this - pod's namespace". + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the - pods matching the labelSelector in the specified - namespaces, where co-located is defined as - running on a node whose value of the label - with key topologyKey matches that of any node - on which any of the selected pods is running. + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object config: - description: Configuration settings for the pgAdmin process. - Changes to any of these values will be loaded without validation. - Be careful, as you may put pgAdmin into an unusable state. + description: |- + Configuration settings for the pgAdmin process. Changes to any of these + values will be loaded without validation. Be careful, as + you may put pgAdmin into an unusable state. properties: files: - description: Files allows the user to mount projected - volumes into the pgAdmin container so that files can - be referenced by pgAdmin as needed. + description: |- + Files allows the user to mount projected volumes into the pgAdmin + container so that files can be referenced by pgAdmin as needed. items: description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object configMap: description: configMap information about the configMap data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced ConfigMap - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the ConfigMap, the - volume setup will error unless it is marked - optional. Paths must be relative and may not - contain the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -14397,41 +16068,43 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object + x-kubernetes-map-type: atomic downwardAPI: description: downwardAPI information about the downwardAPI data to project @@ -14447,7 +16120,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema @@ -14461,19 +16134,15 @@ spec: required: - fieldPath type: object + x-kubernetes-map-type: atomic mode: - description: 'Optional: mode bits used - to set permissions on this file, must - be an octal value between 0000 and 0777 - or a decimal value between 0 and 511. - YAML accepts both octal and decimal - values, JSON requires decimal values - for mode bits. If not specified, the - volume defaultMode will be used. This - might be in conflict with other options - that affect the file mode, like fsGroup, - and the result can be other mode bits - set.' + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: @@ -14485,11 +16154,9 @@ spec: must not start with ''..''' type: string resourceFieldRef: - description: 'Selects a resource of the - container: only resources limits and - requests (limits.cpu, limits.memory, - requests.cpu and requests.memory) are - currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: description: 'Container name: required @@ -14511,27 +16178,26 @@ spec: required: - resource type: object + x-kubernetes-map-type: atomic required: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data to project properties: items: - description: items if unspecified, each key-value - pair in the Data field of the referenced Secret - will be projected into the volume as a file - whose name is the key and content is the value. - If specified, the listed keys will be projected - into the specified paths, and unlisted keys - will not be present. If a key is specified - which is not present in the Secret, the volume - setup will error unless it is marked optional. - Paths must be relative and may not contain - the '..' path or start with '..'. + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. items: description: Maps a string key to a path within a volume. @@ -14540,69 +16206,68 @@ spec: description: key is the key to project. type: string mode: - description: 'mode is Optional: mode bits - used to set permissions on this file. - Must be an octal value between 0000 - and 0777 or a decimal value between - 0 and 511. YAML accepts both octal and - decimal values, JSON requires decimal - values for mode bits. If not specified, - the volume defaultMode will be used. - This might be in conflict with other - options that affect the file mode, like - fsGroup, and the result can be other - mode bits set.' + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: path is the relative path - of the file to map the key to. May not - be an absolute path. May not contain - the path element '..'. May not start - with the string '..'. + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: 'Name of the referent. More info: - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: optional field specify whether the Secret or its key must be defined type: boolean type: object + x-kubernetes-map-type: atomic serviceAccountToken: description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: audience is the intended audience - of the token. A recipient of a token must - identify itself with an identifier specified - in the audience of the token, and otherwise - should reject the token. The audience defaults - to the identifier of the apiserver. + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. type: string expirationSeconds: - description: expirationSeconds is the requested - duration of validity of the service account - token. As the token approaches expiration, - the kubelet volume plugin will proactively - rotate the service account token. The kubelet - will start trying to rotate the token if the - token is older than 80 percent of its time - to live or if the token is older than 24 hours.Defaults - to 1 hour and must be at least 10 minutes. + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. format: int64 type: integer path: - description: path is the path relative to the - mount point of the file to project the token - into. + description: |- + path is the path relative to the mount point of the file to project the + token into. type: string required: - path @@ -14610,15 +16275,22 @@ spec: type: object type: array ldapBindPassword: - description: 'A Secret containing the value for the LDAP_BIND_PASSWORD - setting. More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html' + description: |- + A Secret containing the value for the LDAP_BIND_PASSWORD setting. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/ldap.html properties: key: description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string optional: description: Specify whether the Secret or its key @@ -14627,37 +16299,43 @@ spec: required: - key type: object + x-kubernetes-map-type: atomic settings: - description: 'Settings for the pgAdmin server process. - Keys should be uppercase and values must be constants. - More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html' + description: |- + Settings for the pgAdmin server process. Keys should be uppercase and + values must be constants. + More info: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html type: object x-kubernetes-preserve-unknown-fields: true type: object dataVolumeClaimSpec: - description: 'Defines a PersistentVolumeClaim for pgAdmin - data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + description: |- + Defines a PersistentVolumeClaim for pgAdmin data. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes properties: accessModes: - description: 'accessModes contains the desired access - modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: 'dataSource field can be used to specify - either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) If the provisioner - or an external controller can support the specified - data source, it will create a new volume based on the - contents of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have - the same contents as the DataSourceRef field.' + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. type: string kind: @@ -14670,33 +16348,37 @@ spec: - kind - name type: object + x-kubernetes-map-type: atomic dataSourceRef: - description: 'dataSourceRef specifies the object from - which to populate the volume with data, if a non-empty - volume is desired. This may be any local object from - a non-empty API group (non core object) or a PersistentVolumeClaim - object. When this field is specified, volume binding - will only succeed if the type of the specified object - matches some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource - field and as such if both fields are non-empty, they - must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set - to the same value automatically if one of them is empty - and the other is non-empty. There are two important - differences between DataSource and DataSourceRef: * - While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well as - PersistentVolumeClaim objects. * While DataSource ignores - disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Beta) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. type: string kind: @@ -14705,17 +16387,23 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name type: object resources: - description: 'resources represents the minimum resources - the volume should have. If RecoverVolumeExpansionFailure - feature is enabled users are allowed to specify resource - requirements that are lower than previous value but - must still be higher than capacity recorded in the status - field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: limits: additionalProperties: @@ -14724,8 +16412,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount - of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -14734,11 +16423,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount - of compute resources required. If Requests is omitted - for a container, it defaults to Limits if that is - explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: @@ -14749,8 +16438,8 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. properties: key: @@ -14758,43 +16447,60 @@ spec: applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, - NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values - array must be non-empty. If the operator is - Exists or DoesNotExist, the values array must - be empty. This array is replaced during a - strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field - is "key", the operator is "In", and the values array - contains only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic storageClassName: - description: 'storageClassName is the name of the StorageClass - required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: - description: volumeMode defines what type of volume is - required by the claim. Value of Filesystem is implied - when not included in claim spec. + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. type: string volumeName: description: volumeName is the binding reference to the @@ -14802,10 +16508,11 @@ spec: type: string type: object image: - description: 'Name of a container image that can run pgAdmin - 4. Changing this value causes pgAdmin to restart. The image - may also be set using the RELATED_IMAGE_PGADMIN environment - variable. More info: https://kubernetes.io/docs/concepts/containers/images' + description: |- + Name of a container image that can run pgAdmin 4. Changing this value causes + pgAdmin to restart. The image may also be set using the RELATED_IMAGE_PGADMIN + environment variable. + More info: https://kubernetes.io/docs/concepts/containers/images type: string metadata: description: Metadata contains metadata for custom resources @@ -14820,8 +16527,10 @@ spec: type: object type: object priorityClassName: - description: 'Priority class name for the pgAdmin pod. Changing - this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/' + description: |- + Priority class name for the pgAdmin pod. Changing this value causes pgAdmin + to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ type: string replicas: default: 1 @@ -14831,9 +16540,36 @@ spec: minimum: 0 type: integer resources: - description: 'Compute resources of a pgAdmin container. Changing - this value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + description: |- + Compute resources of a pgAdmin container. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -14841,8 +16577,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -14851,11 +16588,11 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object service: @@ -14886,11 +16623,11 @@ spec: type: object type: object nodePort: - description: The port on which this service is exposed - when type is NodePort or LoadBalancer. Value must be - in-range and not in use or the operation will fail. - If unspecified, a port will be allocated if this Service - requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + description: |- + The port on which this service is exposed when type is NodePort or + LoadBalancer. Value must be in-range and not in use or the operation will + fail. If unspecified, a port will be allocated if this Service requires one. + - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport format: int32 type: integer type: @@ -14903,187 +16640,218 @@ spec: type: string type: object tolerations: - description: 'Tolerations of a pgAdmin pod. Changing this - value causes pgAdmin to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration' + description: |- + Tolerations of a pgAdmin pod. Changing this value causes pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . properties: effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, - allowed values are NoSchedule, PreferNoSchedule and - NoExecute. + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: Key is the taint key that the toleration - applies to. Empty means match all taint keys. If the - key is empty, operator must be Exists; this combination - means to match all values and all keys. + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: Operator represents a key's relationship - to the value. Valid operators are Exists and Equal. - Defaults to Equal. Exists is equivalent to wildcard - for value, so that a pod can tolerate all taints of - a particular category. + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. type: string tolerationSeconds: - description: TolerationSeconds represents the period - of time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the - taint forever (do not evict). Zero and negative values - will be treated as 0 (evict immediately) by the system. + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: Value is the taint value the toleration - matches to. If the operator is Exists, the value should - be empty, otherwise just a regular string. + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array topologySpreadConstraints: - description: 'Topology spread constraints of a pgAdmin pod. - Changing this value causes pgAdmin to restart. More info: - https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/' + description: |- + Topology spread constraints of a pgAdmin pod. Changing this value causes + pgAdmin to restart. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ items: description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. properties: labelSelector: - description: LabelSelector is used to find matching - pods. Pods that match this label selector are counted - to determine the number of pods in their corresponding - topology domain. + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: - description: 'MaxSkew describes the degree to which - pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, - it is the maximum permitted difference between the - number of matching pods in the target topology and - the global minimum. The global minimum is the minimum - number of matching pods in an eligible domain or zero - if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to - 1, and pods with the same labelSelector spread as - 2/2/1: In this case, the global minimum is 1. | zone1 - | zone2 | zone3 | | P P | P P | P | - if MaxSkew - is 1, incoming pod can only be scheduled to zone3 - to become 2/2/2; scheduling it onto zone1(zone2) would - make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto - any zone. When `whenUnsatisfiable=ScheduleAnyway`, - it is used to give higher precedence to topologies - that satisfy it. It''s a required field. Default value - is 1 and 0 is not allowed.' + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. format: int32 type: integer minDomains: - description: "MinDomains indicates a minimum number - of eligible domains. When the number of eligible domains - with matching topology keys is less than minDomains, - Pod Topology Spread treats \"global minimum\" as 0, - and then the calculation of Skew is performed. And - when the number of eligible domains with matching - topology keys equals or greater than minDomains, this - value has no effect on scheduling. As a result, when - the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to - those domains. If value is nil, the constraint behaves - as if MinDomains is equal to 1. Valid values are integers - greater than 0. When value is not nil, WhenUnsatisfiable - must be DoNotSchedule. \n For example, in a 3-zone - cluster, MaxSkew is set to 2, MinDomains is set to - 5 and pods with the same labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | | P P | P P | P P | - The number of domains is less than 5(MinDomains), - so \"global minimum\" is treated as 0. In this situation, - new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod - is scheduled to any of the three zones, it will violate - MaxSkew. \n This is an alpha field and requires enabling - MinDomainsInPodTopologySpread feature gate." + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. format: int32 type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string topologyKey: - description: TopologyKey is the key of node labels. - Nodes that have a label with this key and identical - values are considered to be in the same topology. - We consider each as a "bucket", and try - to put balanced number of pods into each bucket. We - define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose - nodes match the node selector. e.g. If TopologyKey - is "kubernetes.io/hostname", each Node is a domain - of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", - each zone is a domain of that topology. It's a required - field. + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. type: string whenUnsatisfiable: - description: 'WhenUnsatisfiable indicates how to deal - with a pod if it doesn''t satisfy the spread constraint. - - DoNotSchedule (default) tells the scheduler not - to schedule it. - ScheduleAnyway tells the scheduler - to schedule the pod in any location, but giving higher - precedence to topologies that would help reduce the - skew. A constraint is considered "Unsatisfiable" for - an incoming pod if and only if every possible node - assignment for that pod would violate "MaxSkew" on - some topology. For example, in a 3-zone cluster, MaxSkew - is set to 1, and pods with the same labelSelector - spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P - | P | P | If WhenUnsatisfiable is set to DoNotSchedule, - incoming pod can only be scheduled to zone2(zone3) - to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) - satisfies MaxSkew(1). In other words, the cluster - can still be imbalanced, but scheduler won''t make - it *more* imbalanced. It''s a required field.' + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. type: string required: - maxSkew @@ -15098,36 +16866,40 @@ spec: - pgAdmin type: object users: - description: Users to create inside PostgreSQL and the databases they - should access. The default creates one user that can access one - database matching the PostgresCluster name. An empty list creates - no users. Removing a user from this list does NOT drop the user - nor revoke their access. + description: |- + Users to create inside PostgreSQL and the databases they should access. + The default creates one user that can access one database matching the + PostgresCluster name. An empty list creates no users. Removing a user + from this list does NOT drop the user nor revoke their access. items: properties: databases: - description: Databases to which this user can connect and create - objects. Removing a database from this list does NOT revoke - access. This field is ignored for the "postgres" user. + description: |- + Databases to which this user can connect and create objects. Removing a + database from this list does NOT revoke access. This field is ignored for + the "postgres" user. items: - description: 'PostgreSQL identifiers are limited in length - but may contain any character. More info: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS' + description: |- + PostgreSQL identifiers are limited in length but may contain any character. + More info: https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS maxLength: 63 minLength: 1 type: string type: array x-kubernetes-list-type: set name: - description: The name of this PostgreSQL user. The value may - contain only lowercase letters, numbers, and hyphen so that - it fits into Kubernetes metadata. + description: |- + The name of this PostgreSQL user. The value may contain only lowercase + letters, numbers, and hyphen so that it fits into Kubernetes metadata. maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ type: string options: - description: 'ALTER ROLE options except for PASSWORD. This field - is ignored for the "postgres" user. More info: https://www.postgresql.org/docs/current/role-attributes.html' + description: |- + ALTER ROLE options except for PASSWORD. This field is ignored for the + "postgres" user. + More info: https://www.postgresql.org/docs/current/role-attributes.html maxLength: 200 pattern: ^[^;]*$ type: string @@ -15141,11 +16913,11 @@ spec: properties: type: default: ASCII - description: Type of password to generate. Defaults to ASCII. - Valid options are ASCII and AlphaNumeric. "ASCII" passwords - contain letters, numbers, and symbols from the US-ASCII - character set. "AlphaNumeric" passwords contain letters - and numbers from the US-ASCII character set. + description: |- + Type of password to generate. Defaults to ASCII. Valid options are ASCII + and AlphaNumeric. + "ASCII" passwords contain letters, numbers, and symbols from the US-ASCII character set. + "AlphaNumeric" passwords contain letters and numbers from the US-ASCII character set. enum: - ASCII - AlphaNumeric @@ -15162,7 +16934,6 @@ spec: - name x-kubernetes-list-type: map required: - - backups - instances - postgresVersion type: object @@ -15170,40 +16941,40 @@ spec: description: PostgresClusterStatus defines the observed state of PostgresCluster properties: conditions: - description: 'conditions represent the observations of postgrescluster''s - current state. Known .status.conditions.type are: "PersistentVolumeResizing", - "Progressing", "ProxyAvailable"' + description: |- + conditions represent the observations of postgrescluster's current state. + Known .status.conditions.type are: "PersistentVolumeResizing", + "Progressing", "ProxyAvailable" items: description: Condition contains details for one aspect of the current state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -15217,7 +16988,7 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase. + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -15244,6 +17015,11 @@ spec: description: Current state of PostgreSQL instances. items: properties: + desiredPGDataVolume: + additionalProperties: + type: string + description: Desired Size of the pgData volume + type: object name: type: string readyReplicas: @@ -15302,11 +17078,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is in - UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string failed: @@ -15315,18 +17090,19 @@ spec: format: int32 type: integer finished: - description: Specifies whether or not the Job is finished - executing (does not indicate success or failure). + description: |- + Specifies whether or not the Job is finished executing (does not indicate success or + failure). type: boolean id: - description: A unique identifier for the manual backup as - provided using the "pgbackrest-backup" annotation when initiating - a backup. + description: |- + A unique identifier for the manual backup as provided using the "pgbackrest-backup" + annotation when initiating a backup. type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented in - RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -15343,16 +17119,19 @@ spec: host properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST - resource this object represents. Servers may infer this - from the endpoint the client submits requests to. Cannot - be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string ready: description: Whether or not the pgBackRest repository host @@ -15372,14 +17151,14 @@ spec: description: The name of the pgBackRest repository type: string replicaCreateBackupComplete: - description: ReplicaCreateBackupReady indicates whether - a backup exists in the repository as needed to bootstrap - replicas. + description: |- + ReplicaCreateBackupReady indicates whether a backup exists in the repository as needed + to bootstrap replicas. type: boolean repoOptionsHash: - description: A hash of the required fields in the spec for - defining an Azure, GCS or S3 repository, Utilized to detect - changes to these fields and then execute pgBackRest stanza-create + description: |- + A hash of the required fields in the spec for defining an Azure, GCS or S3 repository, + Utilized to detect changes to these fields and then execute pgBackRest stanza-create commands accordingly. type: string stanzaCreated: @@ -15406,11 +17185,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is in - UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string failed: @@ -15419,18 +17197,19 @@ spec: format: int32 type: integer finished: - description: Specifies whether or not the Job is finished - executing (does not indicate success or failure). + description: |- + Specifies whether or not the Job is finished executing (does not indicate success or + failure). type: boolean id: - description: A unique identifier for the manual backup as - provided using the "pgbackrest-backup" annotation when initiating - a backup. + description: |- + A unique identifier for the manual backup as provided using the "pgbackrest-backup" + annotation when initiating a backup. type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented in - RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -15452,11 +17231,10 @@ spec: format: int32 type: integer completionTime: - description: Represents the time the manual backup Job was - determined by the Job controller to be completed. This - field is only set if the backup completed successfully. - Additionally, it is represented in RFC3339 form and is - in UTC. + description: |- + Represents the time the manual backup Job was determined by the Job controller + to be completed. This field is only set if the backup completed successfully. + Additionally, it is represented in RFC3339 form and is in UTC. format: date-time type: string cronJobName: @@ -15472,9 +17250,9 @@ spec: description: The name of the associated pgBackRest repository type: string startTime: - description: Represents the time the manual backup Job was - acknowledged by the Job controller. It is represented - in RFC3339 form and is in UTC. + description: |- + Represents the time the manual backup Job was acknowledged by the Job controller. + It is represented in RFC3339 form and is in UTC. format: date-time type: string succeeded: @@ -15489,8 +17267,9 @@ spec: type: array type: object postgresVersion: - description: Stores the current PostgreSQL major version following - a successful major PostgreSQL upgrade. + description: |- + Stores the current PostgreSQL major version following a successful + major PostgreSQL upgrade. type: integer proxy: description: Current state of the PostgreSQL proxy. @@ -15498,8 +17277,9 @@ spec: pgBouncer: properties: postgresRevision: - description: Identifies the revision of PgBouncer assets that - have been installed into PostgreSQL. + description: |- + Identifies the revision of PgBouncer assets that have been installed into + PostgreSQL. type: string readyReplicas: description: Total number of ready pods. @@ -15517,8 +17297,9 @@ spec: type: string type: object startupInstance: - description: The instance that should be started first when bootstrapping - and/or starting a PostgresCluster. + description: |- + The instance that should be started first when bootstrapping and/or starting a + PostgresCluster. type: string startupInstanceSet: description: The instance set associated with the startupInstance diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index e2625322ae..85b7cbdf29 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,4 +1,3 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: @@ -6,3 +5,13 @@ resources: - bases/postgres-operator.crunchydata.com_postgresclusters.yaml - bases/postgres-operator.crunchydata.com_pgupgrades.yaml - bases/postgres-operator.crunchydata.com_pgadmins.yaml + +patches: +- target: + kind: CustomResourceDefinition + patch: |- + - op: add + path: /metadata/labels + value: + app.kubernetes.io/name: pgo + app.kubernetes.io/version: latest diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 82b2310ca0..7001380693 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -11,7 +11,7 @@ labels: resources: - ../crd -- ../rbac/cluster +- ../rbac - ../manager images: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 4a4d3ec5d4..2eb849e138 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -12,34 +12,38 @@ spec: - name: operator image: postgres-operator env: + - name: PGO_INSTALLER + value: kustomize + - name: PGO_INSTALLER_ORIGIN + value: postgres-operator-repo - name: PGO_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CRUNCHY_DEBUG value: "true" - - name: RELATED_IMAGE_POSTGRES_15 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-0" - - name: RELATED_IMAGE_POSTGRES_15_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.7-3.3-0" - name: RELATED_IMAGE_POSTGRES_16 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.4-2" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.3-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.3-2" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.4-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.4-3.4-2" + - name: RELATED_IMAGE_POSTGRES_17 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.0-0" + - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.0-3.4-0" - name: RELATED_IMAGE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-25" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-31" - name: RELATED_IMAGE_PGBACKREST - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.53.1-0" - name: RELATED_IMAGE_PGBOUNCER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.23-0" - name: RELATED_IMAGE_PGEXPORTER value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest" - name: RELATED_IMAGE_PGUPGRADE value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest" - name: RELATED_IMAGE_STANDALONE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.6-0" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.12-0" securityContext: allowPrivilegeEscalation: false capabilities: { drop: [ALL] } diff --git a/config/rbac/.gitignore b/config/rbac/.gitignore deleted file mode 100644 index 2ad5901955..0000000000 --- a/config/rbac/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/role.yaml diff --git a/config/rbac/cluster/kustomization.yaml b/config/rbac/kustomization.yaml similarity index 100% rename from config/rbac/cluster/kustomization.yaml rename to config/rbac/kustomization.yaml diff --git a/config/rbac/namespace/kustomization.yaml b/config/rbac/namespace/kustomization.yaml deleted file mode 100644 index 82cfb0841b..0000000000 --- a/config/rbac/namespace/kustomization.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- service_account.yaml -- role.yaml -- role_binding.yaml diff --git a/config/rbac/namespace/role.yaml b/config/rbac/namespace/role.yaml deleted file mode 100644 index 06771d13a5..0000000000 --- a/config/rbac/namespace/role.yaml +++ /dev/null @@ -1,164 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: postgres-operator -rules: -- apiGroups: - - '' - resources: - - configmaps - - persistentvolumeclaims - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - endpoints - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - endpoints/restricted - - pods/exec - verbs: - - create -- apiGroups: - - '' - resources: - - events - verbs: - - create - - patch -- apiGroups: - - '' - resources: - - pods - verbs: - - delete - - get - - list - - patch - - watch -- apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - create - - get - - list - - patch - - watch -- apiGroups: - - apps - resources: - - deployments - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - crunchybridgeclusters - verbs: - - get - - list - - patch - - update - - watch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - crunchybridgeclusters/finalizers - - crunchybridgeclusters/status - verbs: - - patch - - update -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - pgadmins - - pgupgrades - verbs: - - get - - list - - watch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - pgadmins/finalizers - - pgupgrades/finalizers - - postgresclusters/finalizers - verbs: - - update -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - pgadmins/status - - pgupgrades/status - - postgresclusters/status - verbs: - - patch -- apiGroups: - - postgres-operator.crunchydata.com - resources: - - postgresclusters - verbs: - - get - - list - - patch - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - - roles - verbs: - - create - - get - - list - - patch - - watch diff --git a/config/rbac/namespace/role_binding.yaml b/config/rbac/namespace/role_binding.yaml deleted file mode 100644 index d7c16c8a5b..0000000000 --- a/config/rbac/namespace/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: postgres-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: postgres-operator -subjects: -- kind: ServiceAccount - name: pgo diff --git a/config/rbac/namespace/service_account.yaml b/config/rbac/namespace/service_account.yaml deleted file mode 100644 index 364f797171..0000000000 --- a/config/rbac/namespace/service_account.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: pgo diff --git a/config/rbac/cluster/role.yaml b/config/rbac/role.yaml similarity index 90% rename from config/rbac/cluster/role.yaml rename to config/rbac/role.yaml index b3c7218e1f..d5783d00b1 100644 --- a/config/rbac/cluster/role.yaml +++ b/config/rbac/role.yaml @@ -5,11 +5,12 @@ metadata: name: postgres-operator rules: - apiGroups: - - '' + - "" resources: - configmaps - persistentvolumeclaims - secrets + - serviceaccounts - services verbs: - create @@ -19,7 +20,7 @@ rules: - patch - watch - apiGroups: - - '' + - "" resources: - endpoints verbs: @@ -31,21 +32,21 @@ rules: - patch - watch - apiGroups: - - '' + - "" resources: - endpoints/restricted - pods/exec verbs: - create - apiGroups: - - '' + - "" resources: - events verbs: - create - patch - apiGroups: - - '' + - "" resources: - pods verbs: @@ -54,16 +55,6 @@ rules: - list - patch - watch -- apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - create - - get - - list - - patch - - watch - apiGroups: - apps resources: @@ -88,6 +79,15 @@ rules: - list - patch - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - watch - apiGroups: - policy resources: @@ -158,6 +158,18 @@ rules: - roles verbs: - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete - get - list - patch diff --git a/config/rbac/cluster/role_binding.yaml b/config/rbac/role_binding.yaml similarity index 100% rename from config/rbac/cluster/role_binding.yaml rename to config/rbac/role_binding.yaml diff --git a/config/rbac/cluster/service_account.yaml b/config/rbac/service_account.yaml similarity index 100% rename from config/rbac/cluster/service_account.yaml rename to config/rbac/service_account.yaml diff --git a/config/singlenamespace/kustomization.yaml b/config/singlenamespace/kustomization.yaml deleted file mode 100644 index a6dc8de538..0000000000 --- a/config/singlenamespace/kustomization.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: postgres-operator - -labels: -- includeSelectors: true - pairs: - postgres-operator.crunchydata.com/control-plane: postgres-operator - -resources: -- ../crd -- ../rbac/namespace -- ../manager - -images: -- name: postgres-operator - newName: registry.developers.crunchydata.com/crunchydata/postgres-operator - newTag: latest - -patches: -- path: manager-target.yaml diff --git a/config/singlenamespace/manager-target.yaml b/config/singlenamespace/manager-target.yaml deleted file mode 100644 index 949250e264..0000000000 --- a/config/singlenamespace/manager-target.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { apiVersion: v1, fieldPath: metadata.namespace } } diff --git a/examples/postgrescluster/postgrescluster.yaml b/examples/postgrescluster/postgrescluster.yaml index dc71573638..75756af94e 100644 --- a/examples/postgrescluster/postgrescluster.yaml +++ b/examples/postgrescluster/postgrescluster.yaml @@ -3,7 +3,6 @@ kind: PostgresCluster metadata: name: hippo spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-0 postgresVersion: 16 instances: - name: instance1 @@ -15,7 +14,6 @@ spec: storage: 1Gi backups: pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-0 repos: - name: repo1 volume: @@ -34,5 +32,4 @@ spec: requests: storage: 1Gi proxy: - pgBouncer: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-0 + pgBouncer: {} diff --git a/go.mod b/go.mod index 0cc542568f..d268d66018 100644 --- a/go.mod +++ b/go.mod @@ -1,95 +1,96 @@ module github.com/crunchydata/postgres-operator -go 1.19 +go 1.22.0 require ( - github.com/evanphx/json-patch/v5 v5.6.0 - github.com/go-logr/logr v1.3.0 + github.com/go-logr/logr v1.4.2 github.com/golang-jwt/jwt/v5 v5.2.1 - github.com/google/go-cmp v0.5.9 - github.com/google/uuid v1.3.1 - github.com/onsi/ginkgo/v2 v2.0.0 - github.com/onsi/gomega v1.18.1 + github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.6.0 + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 + github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/gomega v1.33.1 github.com/pganalyze/pg_query_go/v5 v5.1.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/xdg-go/stringprep v1.0.2 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 - go.opentelemetry.io/otel v1.19.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.2.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 - go.opentelemetry.io/otel/sdk v1.2.0 - go.opentelemetry.io/otel/trace v1.19.0 - golang.org/x/crypto v0.22.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + golang.org/x/crypto v0.27.0 gotest.tools/v3 v3.1.0 - k8s.io/api v0.24.2 - k8s.io/apimachinery v0.24.2 - k8s.io/client-go v0.24.2 - k8s.io/component-base v0.24.2 - sigs.k8s.io/controller-runtime v0.12.3 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.30.2 + k8s.io/apimachinery v0.30.2 + k8s.io/client-go v0.30.2 + k8s.io/component-base v0.30.2 + sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go/compute v1.23.2 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful v2.16.0+incompatible // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/prometheus/client_golang v1.12.2 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/proto/otlp v0.10.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.22.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.2 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.24.2 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/apiextensions-apiserver v0.30.2 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index a926cb9464..aed2056f6f 100644 --- a/go.sum +++ b/go.sum @@ -1,1042 +1,251 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.2 h1:nWEMDhgbBkBJjfpVySqU4jgWdc22PLR0o4vEexZHers= -cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= -github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/pganalyze/pg_query_go/v5 v5.1.0 h1:MlxQqHZnvA3cbRQYyIrjxEjzo560P6MyTgtlaf3pmXg= github.com/pganalyze/pg_query_go/v5 v5.1.0/go.mod h1:FsglvxidZsVN+Ltw3Ai6nTgPVcK2BPukH3jCDEqc1Ug= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 h1:xzbcGykysUh776gzD1LUPsNNHKWN0kQWDnJhn1ddUuk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0/go.mod h1:14T5gr+Y6s2AgHPqBMgnGwp04csUjQmYXFWPeiBoq5s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.2.0 h1:j/jXNzS6Dy0DFgO/oyCvin4H7vTQBg2Vdi6idIzWhCI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.2.0/go.mod h1:k5GnE4m4Jyy2DNh6UAzG6Nml51nuqQyszV7O1ksQAnE= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 h1:OiYdrCq1Ctwnovp6EofSPwlp5aGy4LgKNbkg7PtEUw8= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0/go.mod h1:DUFCmFkXr0VtAHl5Zq2JRx24G6ze5CAq8YfdD36RdX8= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.10.0 h1:n7brgtEbDvXEgGyKKo8SobKT1e9FewlDtXzkVP5djoE= -go.opentelemetry.io/proto/otlp v0.10.0/go.mod h1:zG20xCK0szZ1xdokeSOwEcmlXu+x9kkdRe6N1DhKcfU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= -google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= -k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= -k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI= +k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI= +k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= +k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= +k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= +k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= +k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= +k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= +k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 2c973beb91..7fc3d63c10 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,15 +1,3 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 diff --git a/hack/create-todo-patch.sh b/hack/create-todo-patch.sh deleted file mode 100755 index 7aab184a3a..0000000000 --- a/hack/create-todo-patch.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -directory=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -clusters_dir="${directory}/../build/crd/postgresclusters" -upgrades_dir="${directory}/../build/crd/pgupgrades" - -# Generate a Kustomize patch file for removing any TODOs we inherit from the Kubernetes API. -# Right now there is one TODO in our CRD. This script focuses on removing the specific TODO -# anywhere they are found in the CRD. - -# The TODO comes from the following: -# https://github.com/kubernetes/api/blob/25b7aa9e86de7bba38c35cbe56701d2c1ff207e9/core/v1/types.go#L5609 -# Additionally, the hope is that this step can be removed once the following issue is addressed -# in the kubebuilder controller-tools project: -# https://github.com/kubernetes-sigs/controller-tools/issues/649 - -echo "Generating Kustomize patch file for removing Kube API TODOs" - -# Get the description of the "name" field with the TODO from any place it is used in the CRD and -# store it in a variable. Then, create another variable with the TODO stripped out. -name_desc_with_todo=$( - python3 -m yq -r \ - .spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.customTLSSecret.properties.name.description \ - "${clusters_dir}/generated/postgres-operator.crunchydata.com_postgresclusters.yaml" -) -name_desc_without_todo=$(sed 's/ TODO.*//g' <<< "${name_desc_with_todo}") - -# Generate a JSON patch file to update the "name" description for all applicable paths in the CRD. -python3 -m yq -y --arg old "${name_desc_with_todo}" --arg new "${name_desc_without_todo}" ' - [{ op: "add", path: "/work", value: $new }] + - [paths(select(. == $old)) | { op: "copy", from: "/work", path: "/\(map(tostring) | join("/"))" }] + - [{ op: "remove", path: "/work" }] -' \ - "${clusters_dir}/generated/postgres-operator.crunchydata.com_postgresclusters.yaml" > "${clusters_dir}/todos.yaml" - -python3 -m yq -y --arg old "${name_desc_with_todo}" --arg new "${name_desc_without_todo}" ' - [{ op: "add", path: "/work", value: $new }] + - [paths(select(. == $old)) | { op: "copy", from: "/work", path: "/\(map(tostring) | join("/"))" }] + - [{ op: "remove", path: "/work" }] -' \ - "${upgrades_dir}/generated/postgres-operator.crunchydata.com_pgupgrades.yaml" > "${upgrades_dir}/todos.yaml" diff --git a/hack/generate-rbac.sh b/hack/generate-rbac.sh deleted file mode 100755 index 4ad430a5e9..0000000000 --- a/hack/generate-rbac.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu - -declare -r directory="$1" - -# NOTE(cbandy): `kustomize` v4.1 and `kubectl` v1.22 will be able to change the -# kind of a resource: https://pr.k8s.io/101120 -ruby -r 'set' -r 'yaml' -e ' -directory = ARGV[0] -roles = YAML.load_stream(IO.read(File.join(directory, "role.yaml"))) -operator = roles.shift - -abort "Expected the operator ClusterRole first!" unless operator and operator["kind"] == "ClusterRole" - -# The client used by the controller sets up a cache and an informer for any GVK -# that it GETs. That informer needs the "watch" permission. -# - https://github.com/kubernetes-sigs/controller-runtime/issues/1249 -# - https://github.com/kubernetes-sigs/controller-runtime/issues/1454 -# TODO(cbandy): Move this into an RBAC marker when it can be configured on the Manager. -operator["rules"].each do |rule| - verbs = rule["verbs"].to_set - rule["verbs"] = verbs.add("watch").sort if verbs.intersect? Set["get", "list"] -end - -# Combine the other parsed Roles into the ClusterRole. -rules = operator["rules"] + roles.flat_map { |role| role["rules"] } -rules = rules. - group_by { |rule| rule.slice("apiGroups", "resources") }. - map do |(group_resource, rules)| - verbs = rules.flat_map { |rule| rule["verbs"] }.to_set.sort - group_resource.merge("verbs" => verbs) - end -operator["rules"] = rules.sort_by { |rule| rule.to_a } - -# Combine resources that have the same verbs. -rules = operator["rules"]. - group_by { |rule| rule.slice("apiGroups", "verbs") }. - map do |(group_verb, rules)| - resources = rules.flat_map { |rule| rule["resources"] }.to_set.sort - rule = group_verb.merge("resources" => resources) - rule.slice("apiGroups", "resources", "verbs") # keep the keys in order - end -operator["rules"] = rules.sort_by { |rule| rule.to_a } - -operator["metadata"] = { "name" => "postgres-operator" } -IO.write(File.join(directory, "cluster", "role.yaml"), YAML.dump(operator)) - -operator["kind"] = "Role" -IO.write(File.join(directory, "namespace", "role.yaml"), YAML.dump(operator)) -' -- "${directory}" diff --git a/installers/olm/.gitignore b/installers/olm/.gitignore deleted file mode 100644 index a2d12b4ff2..0000000000 --- a/installers/olm/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/bundles/ -/projects/ -/tools/ -/config/marketplace diff --git a/installers/olm/Makefile b/installers/olm/Makefile deleted file mode 100644 index 9784d352cf..0000000000 --- a/installers/olm/Makefile +++ /dev/null @@ -1,112 +0,0 @@ -.DEFAULT_GOAL := help -.SUFFIXES: - -CONTAINER ?= docker -PGO_VERSION ?= latest -REPLACES_VERSION ?= 5.x.y - -OS_KERNEL ?= $(shell bash -c 'echo $${1,,}' - `uname -s`) -OS_MACHINE ?= $(shell bash -c 'echo $${1/x86_/amd}' - `uname -m`) -SYSTEM = $(OS_KERNEL)-$(OS_MACHINE) - -export PATH := $(CURDIR)/tools/$(SYSTEM):$(PATH) - -export PGO_VERSION - -export REPLACES_VERSION - -distros = community redhat marketplace - -.PHONY: bundles -bundles: ## Build OLM bundles -bundles: $(distros:%=bundles/%) - -# https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle -# https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md -.PHONY: bundles/community -bundles/community: - ./generate.sh community - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - env operator-sdk bundle validate $@ --select-optional='name=community' --optional-values='index-path=$@/Dockerfile' - -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/reviewing-your-metadata-bundle -.PHONY: bundles/redhat -bundles/redhat: - ./generate.sh redhat - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - -# The 'marketplace' configuration is currently identical to the 'redhat', so we just copy it here. -.PHONY: bundles/marketplace -bundles/marketplace: - cp -r ./config/redhat/ ./config/marketplace - ./generate.sh marketplace - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - -.PHONY: clean -clean: clean-deprecated -clean: ## Remove generated files and downloaded tools - rm -rf ./bundles ./projects ./tools ./config/marketplace - -.PHONY: clean-deprecated -clean-deprecated: - rm -rf ./package - -.PHONY: help -help: ALIGN=18 -help: ## Print this message - @awk -F ': ## ' -- "/^[^':]+: ## /"' { printf "'$$(tput bold)'%-$(ALIGN)s'$$(tput sgr0)' %s\n", $$1, $$2 }' $(MAKEFILE_LIST) - -.PHONY: install-olm -install-olm: ## Install OLM in Kubernetes - env operator-sdk olm install - -.PHONY: tools -tools: ## Download tools needed to build bundles - -tools: tools/$(SYSTEM)/jq -tools/$(SYSTEM)/jq: - install -d '$(dir $@)' - curl -fSL -o '$@' "https://github.com/jqlang/jq/releases/download/jq-1.6/jq-$$(SYSTEM='$(SYSTEM)'; \ - case "$$SYSTEM" in \ - (linux-*) echo "$${SYSTEM/-amd/}";; (darwin-*) echo "$${SYSTEM/darwin/osx}";; (*) echo '$(SYSTEM)';; \ - esac)" - chmod u+x '$@' - -tools: tools/$(SYSTEM)/kubectl -tools/$(SYSTEM)/kubectl: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://dl.k8s.io/release/$(shell curl -Ls https://dl.k8s.io/release/stable-1.21.txt)/bin/$(OS_KERNEL)/$(OS_MACHINE)/kubectl' - chmod u+x '$@' - -# quay.io/operator-framework/operator-sdk -tools: tools/$(SYSTEM)/operator-sdk -tools/$(SYSTEM)/operator-sdk: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-sdk/releases/download/v1.18.0/operator-sdk_$(OS_KERNEL)_$(OS_MACHINE)' - chmod u+x '$@' - -tools: tools/$(SYSTEM)/opm -tools/$(SYSTEM)/opm: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-registry/releases/download/v1.33.0/$(OS_KERNEL)-$(OS_MACHINE)-opm' - chmod u+x '$@' - -tools/$(SYSTEM)/venv: - install -d '$(dir $@)' - python3 -m venv '$@' - -tools: tools/$(SYSTEM)/yq -tools/$(SYSTEM)/yq: | tools/$(SYSTEM)/venv - 'tools/$(SYSTEM)/venv/bin/python' -m pip install yq - cd '$(dir $@)' && ln -s venv/bin/yq - -.PHONY: validate-bundles -validate-bundles: ## Build temporary bundle images and run scorecard tests in Kubernetes -validate-bundles: $(distros:%=validate-%-image) -validate-bundles: $(distros:%=validate-%-directory) - -validate-%-directory: - ./validate-directory.sh 'bundles/$*' - -validate-%-image: - ./validate-image.sh '$(CONTAINER)' 'bundles/$*' diff --git a/installers/olm/README.md b/installers/olm/README.md deleted file mode 100644 index e067c86b39..0000000000 --- a/installers/olm/README.md +++ /dev/null @@ -1,147 +0,0 @@ -This directory contains the files that are used to install [Crunchy PostgreSQL for Kubernetes][hub-listing], -which includes PGO, the Postgres Operator from [Crunchy Data][], using [Operator Lifecycle Manager][OLM]. - -The integration centers around a [ClusterServiceVersion][olm-csv] [manifest](./bundle.csv.yaml) -that gets packaged for OperatorHub. Changes there are accepted only if they pass all the [scorecard][] -tests. Consult the [technical requirements][hub-contrib] when making changes. - - - -[Crunchy Data]: https://www.crunchydata.com -[hub-contrib]: https://operator-framework.github.io/community-operators/packaging-operator/ -[hub-listing]: https://operatorhub.io/operator/postgresql -[OLM]: https://github.com/operator-framework/operator-lifecycle-manager -[olm-csv]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md -[scorecard]: https://sdk.operatorframework.io/docs/testing-operators/scorecard/ - -[Red Hat Container Certification]: https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/ -[Red Hat Operator Certification]: https://redhat-connect.gitbook.io/certified-operator-guide/ - - - -## Notes - -### v5 Versions per Repository - -Community: https://github.com/k8s-operatorhub/community-operators/tree/main/operators/postgresql - -5.0.2 -5.0.3 -5.0.4 -5.0.5 -5.1.0 - -Community Prod: https://github.com/redhat-openshift-ecosystem/community-operators-prod/tree/main/operators/postgresql - -5.0.2 -5.0.3 -5.0.4 -5.0.5 -5.1.0 - -Certified: https://github.com/redhat-openshift-ecosystem/certified-operators/tree/main/operators/crunchy-postgres-operator - -5.0.4 -5.0.5 -5.1.0 - -Marketplace: https://github.com/redhat-openshift-ecosystem/redhat-marketplace-operators/tree/main/operators/crunchy-postgres-operator-rhmp - -5.0.4 -5.0.5 -5.1.0 - -### Issues Encountered - -We hit various issues with 5.1.0 where the 'replaces' name, set in the clusterserviceversion.yaml, didn't match the -expected names found for all indexes. Previously, we set the 'com.redhat.openshift.versions' annotation to "v4.6-v4.9". -The goal for this setting was to limit the upper bound of supported versions for a particularly PGO release. -The problem with this was, at the time of the 5.1.0 release, OCP 4.10 had been just been released. This meant that the -5.0.5 bundle did not exist in the OCP 4.10 index. The solution presented by Red Hat was to use the 'skips' clause for -the 5.1.0 release to remedy the immediate problem, but then go back to using an unbounded setting for subsequent -releases. - -For the certified, marketplace and community repositories, this strategy of using 'skips' instead of replaces worked as -expected. However, for the production community operator bundle, we were seeing a failure that required adding an -additional 'replaces' value of 5.0.4 in addition to the 5.0.5 'skips' value. While this allowed the PR to merge, it -seems at odds with the behavior at the other repos. - -For more information on the use of 'skips' and 'replaces', please see: -https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/ - - -Another version issue encountered was related to our attempt to both support OCP v4.6 (which is an Extended Update -Support (EUS) release) while also limiting Kubernetes to 1.20+. The issue with this is that OCP 4.6 utilizes k8s 1.19 -and the kube minversion validation was in fact limiting the OCP version as well. Our hope was that those setting would -be treated independently, but that was unfortunately not the case. The fix for this was to move this kube version to the -1.19, despite its being released 3rd quarter of 2020 with 1 year of patch support. - -Following the lessons learned above, when bumping the Openshift supported version from v4.6 to v4.8, we will similarly -keep the matching minimum Kubernetes version, i.e. 1.21. -https://access.redhat.com/solutions/4870701 - -## Testing - -### Setup - -```sh -make tools -``` - -### Testing - -```sh -make bundles validate-bundles -``` - -Previously, the 'validate_bundle_image' function in validate-bundles.sh ended -with the following command: - -```sh - # Create an index database from the bundle image. - "${opm[@]}" index add --bundles="${image}" --generate - - # drwxr-xr-x. 2 user user 22 database - # -rw-r--r--. 1 user user 286720 database/index.db - # -rw-r--r--. 1 user user 267 index.Dockerfile -``` - -this command was used to generate the updated registry database, but this step -is no longer required when validating the OLM bundles. -- https://github.com/operator-framework/operator-registry/blob/master/docs/design/opm-tooling.md#add-1 - -```sh -BUNDLE_DIRECTORY='bundles/community' -BUNDLE_IMAGE='gcr.io/.../postgres-operator-bundle:latest' -INDEX_IMAGE='gcr.io/.../postgres-operator-bundle-index:latest' -NAMESPACE='pgo' - -docker build --tag "$BUNDLE_IMAGE" "$BUNDLE_DIRECTORY" -docker push "$BUNDLE_IMAGE" - -opm index add --bundles "$BUNDLE_IMAGE" --tag "$INDEX_IMAGE" --container-tool=docker -docker push "$INDEX_IMAGE" - -./install.sh operator "$BUNDLE_DIRECTORY" "$INDEX_IMAGE" "$NAMESPACE" "$NAMESPACE" - -# Cleanup -operator-sdk cleanup postgresql --namespace="$NAMESPACE" -kubectl -n "$NAMESPACE" delete operatorgroup olm-operator-group -``` - -### Post Bundle Generation - -After generating and testing the OLM bundles, there are two manual steps. - -1. Update the image SHA values (denoted with '', required for both the Red Hat 'Certified' and -'Marketplace' bundles) -2. Update the 'description.md' file to indicate which OCP versions this release of PGO was tested against. - -### Troubleshooting - -If, when running `make validate-bundles` you encounter an error similar to - -`cannot find Containerfile or Dockerfile in context directory: stat /mnt/Dockerfile: permission denied` - -the target command is likely being blocked by SELinux and you will need to adjust -your settings accordingly. diff --git a/installers/olm/bundle.Dockerfile b/installers/olm/bundle.Dockerfile deleted file mode 100644 index a81d16f73e..0000000000 --- a/installers/olm/bundle.Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# Used to build the bundle image. This file is ignored by the community operator -# registries which work with bundle directories instead. -# https://operator-framework.github.io/community-operators/packaging-operator/ - -FROM scratch AS builder - -COPY manifests/ /build/manifests/ -COPY metadata/ /build/metadata/ -COPY tests/ /build/tests - - -FROM scratch - -# ANNOTATIONS is replaced with bundle.annotations.yaml -LABEL \ - ${ANNOTATIONS} - -COPY --from=builder /build/ / diff --git a/installers/olm/bundle.annotations.yaml b/installers/olm/bundle.annotations.yaml deleted file mode 100644 index 27dce5aa07..0000000000 --- a/installers/olm/bundle.annotations.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -annotations: - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/ - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm-packaging-format.html - operators.operatorframework.io.bundle.mediatype.v1: registry+v1 - operators.operatorframework.io.bundle.manifests.v1: manifests/ - operators.operatorframework.io.bundle.metadata.v1: metadata/ - - operators.operatorframework.io.test.mediatype.v1: scorecard+v1 - operators.operatorframework.io.test.config.v1: tests/scorecard/ - - # "package.v1" is the name of the PackageManifest. It also determines the URL - # of the details page at OperatorHub.io; "postgresql" here becomes: - # https://operatorhub.io/operator/postgresql - # - # A package consists of multiple bundles (versions) arranged into channels. - # https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/ - operators.operatorframework.io.bundle.package.v1: '' # generate.sh - - # "channels.v1" is the comma-separated list of channels from which this bundle - # can be installed. - # - # "channel.default.v1" is the default channel of the PackageManifest. It is - # the first channel presented, the first used to satisfy dependencies, and - # the one used by a Subscription that does not specify a channel. OLM uses - # the value from the bundle with the highest semantic version. - # - # https://olm.operatorframework.io/docs/best-practices/channel-naming/ - operators.operatorframework.io.bundle.channels.v1: v5 - operators.operatorframework.io.bundle.channel.default.v1: v5 - - # OpenShift v4.9 is the lowest version supported for v5.3.0+. - # https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md - # https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/bundle-directory - com.redhat.delivery.operator.bundle: true - com.redhat.openshift.versions: 'v4.10' - -... diff --git a/installers/olm/bundle.csv.yaml b/installers/olm/bundle.csv.yaml deleted file mode 100644 index 600f8b1bc0..0000000000 --- a/installers/olm/bundle.csv.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# https://olm.operatorframework.io/docs/concepts/crds/clusterserviceversion/ -# https://docs.openshift.com/container-platform/4.7/operators/operator_sdk/osdk-generating-csvs.html -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/creating-the-csv -# https://pkg.go.dev/github.com/operator-framework/api@v0.10.1/pkg/operators/v1alpha1#ClusterServiceVersion - -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - name: '' # generate.sh - annotations: - support: crunchydata.com - olm.properties: '[]' - - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/?category=Database - # https://sdk.operatorframework.io/docs/advanced-topics/operator-capabilities/operator-capabilities/ - categories: Database - capabilities: Auto Pilot - description: Production Postgres Made Easy - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - createdAt: 2019-12-31 19:40Z - repository: https://github.com/CrunchyData/postgres-operator - containerImage: # kustomize config/operator - alm-examples: |- # kustomize config/examples - -spec: - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/ - displayName: Crunchy Postgres for Kubernetes - provider: - # These values become labels on the PackageManifest. - name: Crunchy Data - url: https://www.crunchydata.com/ - keywords: - - postgres - - postgresql - - database - - sql - - operator - - crunchy data - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - description: |- # description.md - version: '' # generate.sh - links: - - name: Crunchy Data - url: https://www.crunchydata.com/ - - name: Documentation - url: https://access.crunchydata.com/documentation/postgres-operator/v5/ - maintainers: - - name: Crunchy Data - email: info@crunchydata.com - - # https://olm.operatorframework.io/docs/best-practices/common/ - # Note: The minKubeVersion must correspond to the lowest supported OCP version - minKubeVersion: 1.23.0 - maturity: stable - # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/how-to-update-operators.md#replaces--channels - replaces: '' # generate.sh - - # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#your-custom-resource-definitions - customresourcedefinitions: - # The "displayName" and "description" fields appear in the "Custom Resource Definitions" section - # on the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql - # - # The "specDescriptors" and "statusDescriptors" fields appear in the OpenShift Console: - # https://github.com/openshift/console/tree/a8b35e4/frontend/packages/operator-lifecycle-manager/src/components/descriptors - owned: # operator-sdk generate kustomize manifests - - # https://olm.operatorframework.io/docs/advanced-tasks/operator-scoping-with-operatorgroups/ - installModes: - - { type: OwnNamespace, supported: true } - - { type: SingleNamespace, supported: true } - - { type: MultiNamespace, supported: false } - - { type: AllNamespaces, supported: true } - - install: - strategy: deployment - spec: - permissions: # kustomize config/operator - deployments: # kustomize config/operator diff --git a/installers/olm/bundle.relatedImages.yaml b/installers/olm/bundle.relatedImages.yaml deleted file mode 100644 index 3824b27b2e..0000000000 --- a/installers/olm/bundle.relatedImages.yaml +++ /dev/null @@ -1,25 +0,0 @@ - relatedImages: - - name: PGADMIN - image: registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256: - - name: PGBACKREST - image: registry.connect.redhat.com/crunchydata/crunchy-pgbackrest@sha256: - - name: PGBOUNCER - image: registry.connect.redhat.com/crunchydata/crunchy-pgbouncer@sha256: - - name: PGEXPORTER - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter@sha256: - - name: PGUPGRADE - image: registry.connect.redhat.com/crunchydata/crunchy-upgrade@sha256: - - name: POSTGRES_14 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: - - name: POSTGRES_15 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: - - name: POSTGRES_14_GIS_3.1 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_14_GIS_3.2 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_14_GIS_3.3 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_15_GIS_3.3 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: postgres-operator - image: registry.connect.redhat.com/crunchydata/postgres-operator@sha256: diff --git a/installers/olm/config/community/kustomization.yaml b/installers/olm/config/community/kustomization.yaml deleted file mode 100644 index a34c7b4844..0000000000 --- a/installers/olm/config/community/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../operator -- ../examples diff --git a/installers/olm/config/examples/kustomization.yaml b/installers/olm/config/examples/kustomization.yaml deleted file mode 100644 index 420c2644f7..0000000000 --- a/installers/olm/config/examples/kustomization.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Custom resources that are imported into the ClusterServiceVersion. -# -# The first for each GVK appears in the "Custom Resource Definitions" section on -# the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql -# -# The "metadata.name" fields should be unique so they can be given a description -# that is presented by compatible UIs. -# https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#crd-templates -# -# The "image" fields should be omitted so the defaults are used. -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- postgrescluster.example.yaml -- pgadmin.example.yaml -- pgupgrade.example.yaml diff --git a/installers/olm/config/examples/pgadmin.example.yaml b/installers/olm/config/examples/pgadmin.example.yaml deleted file mode 100644 index 7ed1d3c03f..0000000000 --- a/installers/olm/config/examples/pgadmin.example.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: example-pgadmin - namespace: openshift-operators -spec: - dataVolumeClaimSpec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - serverGroups: - - name: "Crunchy Postgres for Kubernetes" - postgresClusterSelector: {} diff --git a/installers/olm/config/examples/pgupgrade.example.yaml b/installers/olm/config/examples/pgupgrade.example.yaml deleted file mode 100644 index ad4f45310a..0000000000 --- a/installers/olm/config/examples/pgupgrade.example.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: example-upgrade -spec: - postgresClusterName: example - fromPostgresVersion: 14 - toPostgresVersion: 15 diff --git a/installers/olm/config/operator/kustomization.yaml b/installers/olm/config/operator/kustomization.yaml deleted file mode 100644 index dfdce41618..0000000000 --- a/installers/olm/config/operator/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../../../../config/default - -patches: -- path: target-namespace.yaml diff --git a/installers/olm/config/operator/target-namespace.yaml b/installers/olm/config/operator/target-namespace.yaml deleted file mode 100644 index d7dbaadeef..0000000000 --- a/installers/olm/config/operator/target-namespace.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm/olm-understanding-operatorgroups.html - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { fieldPath: "metadata.annotations['olm.targetNamespaces']" } } diff --git a/installers/olm/config/redhat/kustomization.yaml b/installers/olm/config/redhat/kustomization.yaml deleted file mode 100644 index 4d28b460a2..0000000000 --- a/installers/olm/config/redhat/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - ../operator - - ../examples - -patches: - - path: related-images.yaml - - path: registration.yaml diff --git a/installers/olm/config/redhat/registration.yaml b/installers/olm/config/redhat/registration.yaml deleted file mode 100644 index 8aa8a70ceb..0000000000 --- a/installers/olm/config/redhat/registration.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Red Hat Marketplace requires that bundles work offline. OSBS will fill out -# the "spec.relatedImages" field of the ClusterServiceVersion if it is blank. -# -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators -# https://osbs.readthedocs.io/en/latest/users.html#pinning-pullspecs-for-related-images -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - - { name: REGISTRATION_REQUIRED, value: "true" } - - { name: TOKEN_PATH, value: "/etc/cpk/cpk_token" } - - name: REGISTRATION_URL - value: "https://access.crunchydata.com/register-cpk" - - name: RSA_KEY - value: |- - -----BEGIN PUBLIC KEY----- - MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0JWaCc/F+/uV5zJQ7ryN - uzvO+oGgT7z9uXm11qtKae86H3Z3W4qX+gGPs5LrFg444yDRMLqKzPLwuS2yc4mz - QxtVbJyBZijbEDVd/knycj6MxFdBkbjxeGeWYT8nuZf4jBnWB48/O+uUnCbIYt8Q - hUtyJ+KMIXkxrOd4mOgL6dQSCEAIcxBh10ZAucDQIgCn2BrD595uPrvlrrioV/Nq - P0w0qIaKS785YU75qM4rT8tGeWVMEGst4AaRwfV7ZdVe065TP0hjd9sv8iJkr7En - /Zym1NXcKbpwoeT3X9E7cVSARPFhZU1mmtL56wq3QLeFxef9TmVva1/Io0mKn4ah - Uly5jgOpazrXliKJUoOurfMOakkHWfqSd5EfmRTh5nBcNqxtytLdiH0WlCkPSm+Z - Ue3aY91YwcRnFhImLpbQYD5aVLAryzu+IdfRJa+zcZYSK0N8n9irg6jSrQZBct7z - OagHUc0n/ZDP/BO8m0jlpJ7jH+N31Z5qFoNSaxf5H1Y/CwByXtzHJ1k2LleYsr9k - k40nMY4l+SXCe4PmW4zW9uP3ItBWKEI2jFrRJgowQvL0MwtzDhbX9qg4+L9eBFpK - jpHXr2kgLu4srIyXH6JO5UmE/62mHZh0SuqtOT1GQqWde5RjZyidYkwkAHup/AqA - P0TPL/poQ6yvI9a0i22TCpcCAwEAAQ== - -----END PUBLIC KEY----- - volumeMounts: - - mountPath: /etc/cpk - name: cpk-registration-volume - volumes: - - name: cpk-registration-volume - secret: - optional: true - secretName: cpk-registration diff --git a/installers/olm/config/redhat/related-images.yaml b/installers/olm/config/redhat/related-images.yaml deleted file mode 100644 index 7feea0c3f2..0000000000 --- a/installers/olm/config/redhat/related-images.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# Red Hat Marketplace requires that bundles work offline. OSBS will fill out -# the "spec.relatedImages" field of the ClusterServiceVersion if it is blank. -# -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators -# https://osbs.readthedocs.io/en/latest/users.html#pinning-pullspecs-for-related-images -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - image: registry.connect.redhat.com/crunchydata/postgres-operator@sha256: - env: - - { - name: RELATED_IMAGE_PGADMIN, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256:", - } - - { - name: RELATED_IMAGE_STANDALONE_PGADMIN, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256:", - } - - { - name: RELATED_IMAGE_PGBACKREST, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgbackrest@sha256:", - } - - { - name: RELATED_IMAGE_PGBOUNCER, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgbouncer@sha256:", - } - - { - name: RELATED_IMAGE_PGEXPORTER, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter@sha256:", - } - - { - name: RELATED_IMAGE_PGUPGRADE, - value: "registry.connect.redhat.com/crunchydata/crunchy-upgrade@sha256:", - } - - - { - name: RELATED_IMAGE_POSTGRES_14, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_15, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_16, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:", - } - - - { - name: RELATED_IMAGE_POSTGRES_14_GIS_3.1, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_14_GIS_3.2, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_14_GIS_3.3, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_15_GIS_3.3, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } diff --git a/installers/olm/description.md b/installers/olm/description.md deleted file mode 100644 index 4528ba5aad..0000000000 --- a/installers/olm/description.md +++ /dev/null @@ -1,75 +0,0 @@ -[Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes), is the leading Kubernetes native -Postgres solution. Built on PGO, the Postgres Operator from Crunchy Data, Crunchy Postgres for Kubernetes gives you a declarative Postgres -solution that automatically manages your PostgreSQL clusters. - -Designed for your GitOps workflows, it is [easy to get started](https://access.crunchydata.com/documentation/postgres-operator/latest/quickstart) -with Crunchy Postgres for Kubernetes. Within a few moments, you can have a production grade Postgres cluster complete with high availability, disaster -recovery, and monitoring, all over secure TLS communications. Even better, Crunchy Postgres for Kubernetes lets you easily customize your Postgres -cluster to tailor it to your workload! - -With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive changes with minimal downtime, Crunchy Postgres -for Kubernetes is ready to support your Postgres data at every stage of your release pipeline. Built for resiliency and uptime, Crunchy Postgres -for Kubernetes will keep your Postgres cluster in a desired state so you do not need to worry about it. - -Crunchy Postgres for Kubernetes is developed with many years of production experience in automating Postgres management on Kubernetes, providing -a seamless cloud native Postgres solution to keep your data always available. - -Crunchy Postgres for Kubernetes is made available to users without an active Crunchy Data subscription in connection with Crunchy Data's -[Developer Program](https://www.crunchydata.com/developers/terms-of-use). -For more information, please contact us at [info@crunchydata.com](mailto:info@crunchydata.com). - -- **PostgreSQL Cluster Provisioning**: [Create, Scale, & Delete PostgreSQL clusters with ease][provisioning], - while fully customizing your Pods and PostgreSQL configuration! -- **High-Availability**: Safe, automated failover backed by a [distributed consensus based high-availability solution][high-availability]. - Uses [Pod Anti-Affinity][k8s-anti-affinity] to help resiliency; you can configure how aggressive this can be! - Failed primaries automatically heal, allowing for faster recovery time. You can even create regularly scheduled - backups as well and set your backup retention policy -- **Disaster Recovery**: [Backups][backups] and [restores][disaster-recovery] leverage the open source [pgBackRest][] utility and - [includes support for full, incremental, and differential backups as well as efficient delta restores][backups]. - Set how long you want your backups retained for. Works great with very large databases! -- **Monitoring**: [Track the health of your PostgreSQL clusters][monitoring] using the open source [pgMonitor][] library. -- **Clone**: [Create new clusters from your existing clusters or backups][clone] with efficient data cloning. -- **TLS**: All connections are over [TLS][tls]. You can also [bring your own TLS infrastructure][tls] if you do not want to use the provided defaults. -- **Connection Pooling**: Advanced [connection pooling][pool] support using [pgBouncer][]. -- **Affinity and Tolerations**: Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference. - Set your [pod anti-affinity][k8s-anti-affinity], node affinity, Pod tolerations and more rules to customize your deployment topology! -- **PostgreSQL Major Version Upgrades**: Perform a [PostgreSQL major version upgrade][major-version-upgrade] declaratively. -- **Database Administration**: Easily deploy [pgAdmin4][pgadmin] to administer your PostgresClusters' databases. - The automatic discovery of PostgresClusters ensures that you are able to seamlessly access any databases within your environment from the pgAdmin4 GUI. -- **Full Customizability**: Crunchy PostgreSQL for Kubernetes makes it easy to get your own PostgreSQL-as-a-Service up and running - and fully customize your deployments, including: - - Choose the resources for your Postgres cluster: [container resources and storage size][resize-cluster]. [Resize at any time][resize-cluster] with minimal disruption. - - Use your own container image repository, including support `imagePullSecrets` and private repositories - - [Customize your PostgreSQL configuration][customize-cluster] - -and much more! - -[backups]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery -[clone]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/backups-disaster-recovery/disaster-recovery -[customize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/day-two/customize-cluster -[disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/disaster-recovery -[high-availability]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/high-availability -[major-version-upgrade]: https://access.crunchydata.com/documentation/postgres-operator/v5/guides/major-postgres-version-upgrade/ -[monitoring]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/monitoring -[pool]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/connection-pooling -[provisioning]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/create-cluster -[resize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/cluster-management/resize-cluster -[tls]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/day-two/customize-cluster#customize-tls - -[k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity -[k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ - -[pgAdmin]: https://www.pgadmin.org/ -[pgBackRest]: https://www.pgbackrest.org -[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/connection-pooling -[pgMonitor]: https://github.com/CrunchyData/pgmonitor - -## Post-Installation - -### Tutorial - -Want to [learn more about the PostgreSQL Operator][tutorial]? Browse through the [tutorial][] to learn more about what you can do, [join the Discord server][discord] for community support, or check out the [PGO GitHub repo][ghrepo] to learn more about the open source Postgres Operator project that powers Crunchy Postgres for Kubernetes. - -[tutorial]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials -[discord]: https://discord.gg/a7vWKG8Ec9 -[ghrepo]: https://github.com/CrunchyData/postgres-operator diff --git a/installers/olm/generate.sh b/installers/olm/generate.sh deleted file mode 100755 index 8814bd4c75..0000000000 --- a/installers/olm/generate.sh +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2016 -# vim: set noexpandtab : -set -eu - -DISTRIBUTION="$1" - -cd "${BASH_SOURCE[0]%/*}" - -bundle_directory="bundles/${DISTRIBUTION}" -project_directory="projects/${DISTRIBUTION}" -go_api_directory=$(cd ../../pkg/apis && pwd) - -# The 'operators.operatorframework.io.bundle.package.v1' package name for each -# bundle (updated for the 'certified' and 'marketplace' bundles). -package_name='postgresql' - -# The project name used by operator-sdk for initial bundle generation. -project_name='postgresoperator' - -# The prefix for the 'clusterserviceversion.yaml' file. -# Per OLM guidance, the filename for the clusterserviceversion.yaml must be prefixed -# with the Operator's package name for the 'redhat' and 'marketplace' bundles. -# https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#get-supported-versions -file_name='postgresoperator' -case "${DISTRIBUTION}" in - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - 'redhat') - file_name='crunchy-postgres-operator' - package_name='crunchy-postgres-operator' - ;; - # https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/ci-pipeline.md#bundle-structure - 'marketplace') - file_name='crunchy-postgres-operator-rhmp' - package_name='crunchy-postgres-operator-rhmp' - ;; -esac - -operator_yamls=$(kubectl kustomize "config/${DISTRIBUTION}") -operator_crds=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "CustomResourceDefinition"))') -operator_deployments=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "Deployment"))') -operator_accounts=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ServiceAccount"))') -operator_roles=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ClusterRole"))') - -# Recreate the Operator SDK project. -[ ! -d "${project_directory}" ] || rm -r "${project_directory}" -install -d "${project_directory}" -( - cd "${project_directory}" - operator-sdk init --fetch-deps='false' --project-name=${project_name} - rm ./*.go go.* - - # Generate CRD descriptions from Go markers. - # https://sdk.operatorframework.io/docs/building-operators/golang/references/markers/ - crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name - })') - yq --in-place --yaml-roundtrip --argjson resources "${crd_gvks}" \ - '.multigroup = true | .resources = $resources | .' ./PROJECT - - ln -s "${go_api_directory}" . - operator-sdk generate kustomize manifests --interactive='false' -) - -# Recreate the OLM bundle. -[ ! -d "${bundle_directory}" ] || rm -r "${bundle_directory}" -install -d \ - "${bundle_directory}/manifests" \ - "${bundle_directory}/metadata" \ - "${bundle_directory}/tests/scorecard" \ - -# `echo "${operator_yamls}" | operator-sdk generate bundle` includes the ServiceAccount which cannot -# be upgraded: https://github.com/operator-framework/operator-lifecycle-manager/issues/2193 - -# Include Operator SDK scorecard tests. -# https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ -kubectl kustomize "${project_directory}/config/scorecard" \ - > "${bundle_directory}/tests/scorecard/config.yaml" - -# Render bundle annotations and strip comments. -# Per Red Hat we should not include the org.opencontainers annotations in the -# 'redhat' & 'marketplace' annotations.yaml file, so only add them for 'community'. -# - https://coreos.slack.com/team/UP1LZCC1Y -if [ ${DISTRIBUTION} == 'community' ]; then -yq --yaml-roundtrip < bundle.annotations.yaml > "${bundle_directory}/metadata/annotations.yaml" \ - --arg package "${package_name}" \ -' - .annotations["operators.operatorframework.io.bundle.package.v1"] = $package | - .annotations["org.opencontainers.image.authors"] = "info@crunchydata.com" | - .annotations["org.opencontainers.image.url"] = "https://crunchydata.com" | - .annotations["org.opencontainers.image.vendor"] = "Crunchy Data" | -.' -else -yq --yaml-roundtrip < bundle.annotations.yaml > "${bundle_directory}/metadata/annotations.yaml" \ - --arg package "${package_name}" \ -' - .annotations["operators.operatorframework.io.bundle.package.v1"] = $package | -.' -fi - -# Copy annotations into Dockerfile LABELs. -labels=$(yq --raw-output < "${bundle_directory}/metadata/annotations.yaml" \ - '.annotations | to_entries | map(.key +"="+ (.value | tojson)) | join(" \\\n\t")') -ANNOTATIONS="${labels}" envsubst '$ANNOTATIONS' < bundle.Dockerfile > "${bundle_directory}/Dockerfile" - -# Include CRDs as manifests. -crd_names=$(yq --raw-output <<< "${operator_crds}" 'to_entries[] | [.key, .value.metadata.name] | @tsv') -while IFS=$'\t' read -r index name; do - yq --yaml-roundtrip <<< "${operator_crds}" ".[${index}]" > "${bundle_directory}/manifests/${name}.crd.yaml" -done <<< "${crd_names}" - - -abort() { echo >&2 "$@"; exit 1; } -dump() { yq --color-output; } - -yq > /dev/null <<< "${operator_deployments}" --exit-status 'length == 1' || - abort "too many deployments!" $'\n'"$(dump <<< "${operator_deployments}")" - -yq > /dev/null <<< "${operator_accounts}" --exit-status 'length == 1' || - abort "too many service accounts!" $'\n'"$(dump <<< "${operator_accounts}")" - -yq > /dev/null <<< "${operator_roles}" --exit-status 'length == 1' || - abort "too many roles!" $'\n'"$(dump <<< "${operator_roles}")" - -# Render bundle CSV and strip comments. - -csv_stem=$(yq --raw-output '.projectName' "${project_directory}/PROJECT") - -crd_descriptions=$(yq '.spec.customresourcedefinitions.owned' \ -"${project_directory}/config/manifests/bases/${csv_stem}.clusterserviceversion.yaml") - -crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name -} | { - apiVersion: "\(.group)/\(.version)", kind -})') -crd_examples=$(yq <<< "${operator_yamls}" --slurp --argjson gvks "${crd_gvks}" 'map(select( - IN({ apiVersion, kind }; $gvks | .[]) -))') - -yq --yaml-roundtrip < bundle.csv.yaml > "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" \ - --argjson deployment "$(yq <<< "${operator_deployments}" 'first')" \ - --argjson account "$(yq <<< "${operator_accounts}" 'first | .metadata.name')" \ - --argjson rules "$(yq <<< "${operator_roles}" 'first | .rules')" \ - --argjson crds "${crd_descriptions}" \ - --arg examples "${crd_examples}" \ - --arg version "${PGO_VERSION}" \ - --arg replaces "${REPLACES_VERSION}" \ - --arg description "$(< description.md)" \ - --arg icon "$(base64 ../seal.svg | tr -d '\n')" \ - --arg stem "${csv_stem}" \ -' - .metadata.annotations["alm-examples"] = $examples | - .metadata.annotations["containerImage"] = ($deployment.spec.template.spec.containers[0].image) | - - .metadata.name = "\($stem).v\($version)" | - .spec.version = $version | - .spec.replaces = "\($stem).v\($replaces)" | - - .spec.customresourcedefinitions.owned = $crds | - .spec.description = $description | - .spec.icon = [{ mediatype: "image/svg+xml", base64data: $icon }] | - - .spec.install.spec.permissions = [{ serviceAccountName: $account, rules: $rules }] | - .spec.install.spec.deployments = [( $deployment | { name: .metadata.name, spec } )] | -.' - -case "${DISTRIBUTION}" in - 'redhat') - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - yq --in-place --yaml-roundtrip \ - ' - .metadata.annotations.certified = "true" | - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .' \ - "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - - # Finally, add related images. NOTE: SHA values will need to be updated - # -https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#digest-pinning - cat bundle.relatedImages.yaml >> "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - ;; - 'marketplace') - # Annotations needed when targeting Red Hat Marketplace - # https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/ci-pipeline.md#bundle-structure - yq --in-place --yaml-roundtrip \ - --arg package_url "https://marketplace.redhat.com/en-us/operators/${file_name}" \ - ' - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .metadata.annotations["marketplace.openshift.io/remote-workflow"] = - "\($package_url)/pricing?utm_source=openshift_console" | - .metadata.annotations["marketplace.openshift.io/support-workflow"] = - "\($package_url)/support?utm_source=openshift_console" | - .' \ - "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - - # Finally, add related images. NOTE: SHA values will need to be updated - # -https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#digest-pinning - cat bundle.relatedImages.yaml >> "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - ;; -esac - -if > /dev/null command -v tree; then tree -C "${bundle_directory}"; fi diff --git a/installers/olm/install.sh b/installers/olm/install.sh deleted file mode 100755 index 2c4f6ce190..0000000000 --- a/installers/olm/install.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc >/dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -catalog_source() ( - source_namespace="$1" - source_name="$2" - index_image="$3" - - kc() { kubectl --namespace="$source_namespace" "$@"; } - kc get namespace "$source_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$source_namespace" - - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#CatalogSource - source_json=$(jq --null-input \ - --arg name "${source_name}" \ - --arg image "${index_image}" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "CatalogSource", - metadata: { name: $name }, - spec: { - displayName: "Test Registry", - sourceType: "grpc", image: $image - } - }') - kc create --filename=- <<< "$source_json" - - # Wait for Pod to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get pod --selector="olm.catalogSource=${source_name}" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=ready' --timeout='30s' pod --selector="olm.catalogSource=${source_name}"; then - kc logs --previous --tail='-1' --selector="olm.catalogSource=${source_name}" - fi -) - -operator_group() ( - group_namespace="$1" - group_name="$2" - target_namespaces=("${@:3}") - - kc() { kubectl --namespace="$group_namespace" "$@"; } - kc get namespace "$group_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$group_namespace" - - group_json="$( jq <<< '{}' --arg name "$group_name" '{ - apiVersion: "operators.coreos.com/v1", kind: "OperatorGroup", - metadata: { "name": $name }, - spec: { targetNamespaces: [] } - }' )" - - for ns in "${target_namespaces[@]}"; do - group_json="$( jq <<< "$group_json" --arg namespace "$ns" '.spec.targetNamespaces += [ $namespace ]' )" - done - - kc create --filename=- <<< "$group_json" -) - -operator() ( - bundle_directory="$1" index_image="$2" - operator_namespace="$3" - target_namespaces=("${@:4}") - - package_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.package.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - channel_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.channels.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - csv_name=$(yq --raw-output '.metadata.name' \ - "${bundle_directory}"/*/*.clusterserviceversion.yaml) - - kc() { kubectl --namespace="$operator_namespace" "$@"; } - - catalog_source "$operator_namespace" olm-catalog-source "${index_image}" - operator_group "$operator_namespace" olm-operator-group "${target_namespaces[@]}" - - # Create a Subscription to install the operator. - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#Subscription - subscription_json=$(jq --null-input \ - --arg channel "$channel_name" \ - --arg namespace "$operator_namespace" \ - --arg package "$package_name" \ - --arg version "$csv_name" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "Subscription", - metadata: { name: $package }, - spec: { - name: $package, - sourceNamespace: $namespace, - source: "olm-catalog-source", - startingCSV: $version, - channel: $channel - } - }') - kc create --filename=- <<< "$subscription_json" - - # Wait for the InstallPlan to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get installplan --output=jsonpath="{.items}" )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=installed' --timeout='30s' installplan --all; then - subscription_uid="$( kc get subscription "$package_name" --output=jsonpath='{.metadata.uid}' )" - installplan_json="$( kc get installplan --output=json )" - - jq <<< "$installplan_json" --arg uid "$subscription_uid" \ - '.items[] | select(.metadata.ownerReferences[] | select(.uid == $uid)).status.conditions' - exit 1 - fi - - # Wait for Deployment to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get deploy --selector="olm.owner=$csv_name" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=available' --timeout='30s' deploy --selector="olm.owner=$csv_name"; then - kc describe pod --selector="olm.owner=$csv_name" - - crashed_containers="$( kc get pod --selector="olm.owner=$csv_name" --output=json )" - crashed_containers="$( jq <<< "$crashed_containers" --raw-output \ - '.items[] | { - pod: .metadata.name, - container: .status.containerStatuses[] | select(.restartCount > 0).name - } | [.pod, .container] | @tsv' )" - - test -z "$crashed_containers" || while IFS=$'\t' read -r pod container; do - echo; echo "$pod/$container" restarted: - kc logs --container="$container" --previous --tail='-1' "pod/$pod" - done <<< "$crashed_containers" - - exit 1 - fi -) - -"$@" diff --git a/installers/olm/validate-directory.sh b/installers/olm/validate-directory.sh deleted file mode 100755 index 726f64946e..0000000000 --- a/installers/olm/validate-directory.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc > /dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -validate_bundle_directory() { - local directory="$1" - local namespace - - namespace=$(kubectl create --filename=- --output='go-template={{.metadata.name}}' <<< '{ - "apiVersion": "v1", "kind": "Namespace", - "metadata": { - "generateName": "olm-test-", - "labels": { "olm-test": "bundle-directory" } - } - }') - echo 'namespace "'"${namespace}"'" created' - push_trap_exit "kubectl delete namespace '${namespace}'" - - # https://olm.operatorframework.io/docs/best-practices/common/ - # https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ - operator-sdk scorecard --namespace="${namespace}" "${directory}" -} - -validate_bundle_directory "$@" diff --git a/installers/olm/validate-image.sh b/installers/olm/validate-image.sh deleted file mode 100755 index 9d9adef6cf..0000000000 --- a/installers/olm/validate-image.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -# Store anything in a single temporary directory that gets cleaned up. -TMPDIR=$(mktemp -d) -push_trap_exit "rm -rf '${TMPDIR}'" -export TMPDIR - -validate_bundle_image() { - local container="$1" directory="$2" - directory=$(cd "${directory}" && pwd) - - cat > "${TMPDIR}/registry.config" <<-SSL - [req] - distinguished_name = req_distinguished_name - x509_extensions = v3_ext - prompt = no - [req_distinguished_name] - commonName = localhost - [v3_ext] - subjectAltName = @alt_names - [alt_names] - DNS.1 = localhost - SSL - - openssl ecparam -name prime256v1 -genkey -out "${TMPDIR}/registry.key" - openssl req -new -x509 -days 1 \ - -config "${TMPDIR}/registry.config" \ - -key "${TMPDIR}/registry.key" \ - -out "${TMPDIR}/registry.crt" - - # Start a local image registry. - local image port registry - registry=$(${container} run --detach --publish-all \ - --env='REGISTRY_HTTP_TLS_CERTIFICATE=/mnt/registry.crt' \ - --env='REGISTRY_HTTP_TLS_KEY=/mnt/registry.key' \ - --volume="${TMPDIR}:/mnt" \ - docker.io/library/registry:latest) - # https://github.com/containers/podman/issues/8524 - push_trap_exit "echo -n 'Removing '; ${container} rm '${registry}'" - push_trap_exit "echo -n 'Stopping '; ${container} stop '${registry}'" - - port=$(${container} inspect "${registry}" \ - --format='{{ (index .NetworkSettings.Ports "5000/tcp" 0).HostPort }}') - image="localhost:${port}/postgres-operator-bundle:latest" - - cat > "${TMPDIR}/registries.conf" <<-TOML - [[registry]] - location = "localhost:${port}" - insecure = true - TOML - - # Build the bundle image and push it to the local registry. - ${container} run --rm \ - --device='/dev/fuse:rw' --network='host' --security-opt='seccomp=unconfined' \ - --volume="${TMPDIR}/registries.conf:/etc/containers/registries.conf.d/localhost.conf:ro" \ - --volume="${directory}:/mnt:delegated" \ - --workdir='/mnt' \ - quay.io/buildah/stable:latest \ - buildah build-using-dockerfile \ - --format='docker' --layers --tag="docker://${image}" - - local -a opm - local opm_version - opm_version=$(opm version) - opm_version=$(sed -n 's#.*OpmVersion:"\([^"]*\)".*#\1# p' <<< "${opm_version}") - # shellcheck disable=SC2206 - opm=(${container} run --rm - --network='host' - --volume="${TMPDIR}/registry.crt:/usr/local/share/ca-certificates/registry.crt:ro" - --volume="${TMPDIR}:/mnt:delegated" - --workdir='/mnt' - quay.io/operator-framework/upstream-opm-builder:"${opm_version}" - sh -ceu 'update-ca-certificates && exec "$@"' - opm) - - # Validate the bundle image in the local registry. - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle - "${opm[@]}" alpha bundle validate --image-builder='none' \ - --optional-validators='operatorhub,bundle-objects' \ - --tag="${image}" -} - -validate_bundle_image "$@" diff --git a/installers/seal.svg b/installers/seal.svg deleted file mode 100644 index 28e875f48f..0000000000 --- a/installers/seal.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/internal/bridge/client.go b/internal/bridge/client.go index 29bd009814..d5ad8470f7 100644 --- a/internal/bridge/client.go +++ b/internal/bridge/client.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/bridge/client_test.go b/internal/bridge/client_test.go index 5b1e6f6665..28728c701c 100644 --- a/internal/bridge/client_test.go +++ b/internal/bridge/client_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go index 5276678fa5..d77d719d6a 100644 --- a/internal/bridge/crunchybridgecluster/apply.go +++ b/internal/bridge/crunchybridgecluster/apply.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go index d2f9e72723..03d67442be 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster @@ -31,9 +20,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crunchydata/postgres-operator/internal/bridge" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" pgoRuntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -67,13 +56,12 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( // Wake periodically to check Bridge API for all CrunchyBridgeClusters. // Potentially replace with different requeue times, remove the Watch function // Smarter: retry after a certain time for each cluster: https://gist.github.com/cbandy/a5a604e3026630c5b08cfbcdfffd2a13 - Watches( - pgoRuntime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}), - r.Watch(), + WatchesRawSource( + pgoRuntime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), ). // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters Watches( - &source.Kind{Type: &corev1.Secret{}}, + &corev1.Secret{}, r.watchForRelatedSecret(), ). Complete(r) @@ -154,11 +142,6 @@ func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, err } else if result != nil { if log := log.V(1); log.Enabled() { - if result.RequeueAfter > 0 { - // RequeueAfter implies Requeue, but set both to make the next - // log message more clear. - result.Requeue = true - } log.Info("deleting", "result", fmt.Sprintf("%+v", *result)) } return *result, err @@ -198,7 +181,7 @@ func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl } // if we've gotten here then no cluster exists with that name and we're missing the ID, ergo, create cluster - return r.handleCreateCluster(ctx, key, team, crunchybridgecluster) + return r.handleCreateCluster(ctx, key, team, crunchybridgecluster), nil } // If we reach this point, our CrunchyBridgeCluster object has an ID, so we want @@ -240,7 +223,7 @@ func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl // TODO(crunchybridgecluster): Do we want the operator to interrupt // upgrades created through the GUI/API? if len(crunchybridgecluster.Status.OngoingUpgrade) != 0 { - return ctrl.Result{RequeueAfter: 3 * time.Minute}, nil + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil } // Check if there's an upgrade difference for the three upgradeable fields that hit the upgrade endpoint @@ -249,14 +232,14 @@ func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl if (crunchybridgecluster.Spec.Storage != *crunchybridgecluster.Status.Storage) || crunchybridgecluster.Spec.Plan != crunchybridgecluster.Status.Plan || crunchybridgecluster.Spec.PostgresVersion != crunchybridgecluster.Status.MajorVersion { - return r.handleUpgrade(ctx, key, crunchybridgecluster) + return r.handleUpgrade(ctx, key, crunchybridgecluster), nil } // Are there diffs between the cluster response from the Bridge API and the spec? // HA diffs are sent to /clusters/{cluster_id}/actions/[enable|disable]-ha // so have to know (a) to send and (b) which to send to if crunchybridgecluster.Spec.IsHA != *crunchybridgecluster.Status.IsHA { - return r.handleUpgradeHA(ctx, key, crunchybridgecluster) + return r.handleUpgradeHA(ctx, key, crunchybridgecluster), nil } // Check if there's a difference in is_protected, name, maintenance_window_start, etc. @@ -264,13 +247,13 @@ func (r *CrunchyBridgeClusterReconciler) Reconcile(ctx context.Context, req ctrl // updates to these fields that hit the PATCH `clusters/` endpoint if crunchybridgecluster.Spec.IsProtected != *crunchybridgecluster.Status.IsProtected || crunchybridgecluster.Spec.ClusterName != crunchybridgecluster.Status.ClusterName { - return r.handleUpdate(ctx, key, crunchybridgecluster) + return r.handleUpdate(ctx, key, crunchybridgecluster), nil } log.Info("Reconciled") // TODO(crunchybridgecluster): do we always want to requeue? Does the Watch mean we // don't need this, or do we want both? - return ctrl.Result{RequeueAfter: 3 * time.Minute}, nil + return runtime.RequeueWithoutBackoff(3 * time.Minute), nil } // reconcileBridgeConnectionSecret looks for the Bridge connection secret specified by the cluster, @@ -370,7 +353,7 @@ func (r *CrunchyBridgeClusterReconciler) handleDuplicateClusterName(ctx context. // handleCreateCluster handles creating new Crunchy Bridge Clusters func (r *CrunchyBridgeClusterReconciler) handleCreateCluster(ctx context.Context, apiKey, teamId string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, -) (ctrl.Result, error) { +) ctrl.Result { log := ctrl.LoggerFrom(ctx) createClusterRequestPayload := &bridge.PostClustersRequestPayload{ @@ -400,7 +383,7 @@ func (r *CrunchyBridgeClusterReconciler) handleCreateCluster(ctx context.Context // TODO(crunchybridgecluster): If the payload is wrong, we don't want to requeue, so pass nil error // If the transmission hit a transient problem, we do want to requeue - return ctrl.Result{}, nil + return ctrl.Result{} } crunchybridgecluster.Status.ID = cluster.ID @@ -420,7 +403,7 @@ func (r *CrunchyBridgeClusterReconciler) handleCreateCluster(ctx context.Context Message: "The condition of the upgrade(s) is unknown.", }) - return ctrl.Result{RequeueAfter: 3 * time.Minute}, nil + return runtime.RequeueWithoutBackoff(3 * time.Minute) } // handleGetCluster handles getting the cluster details from Bridge and @@ -539,7 +522,7 @@ func (r *CrunchyBridgeClusterReconciler) handleGetClusterUpgrade(ctx context.Con func (r *CrunchyBridgeClusterReconciler) handleUpgrade(ctx context.Context, apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, -) (ctrl.Result, error) { +) ctrl.Result { log := ctrl.LoggerFrom(ctx) log.Info("Handling upgrade request") @@ -565,7 +548,7 @@ func (r *CrunchyBridgeClusterReconciler) handleUpgrade(ctx context.Context, "Error performing an upgrade: %s", err), }) log.Error(err, "Error while attempting cluster upgrade") - return ctrl.Result{}, nil + return ctrl.Result{} } clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) @@ -581,7 +564,7 @@ func (r *CrunchyBridgeClusterReconciler) handleUpgrade(ctx context.Context, }) } - return ctrl.Result{RequeueAfter: 3 * time.Minute}, nil + return runtime.RequeueWithoutBackoff(3 * time.Minute) } // handleUpgradeHA handles upgrades that hit the @@ -589,7 +572,7 @@ func (r *CrunchyBridgeClusterReconciler) handleUpgrade(ctx context.Context, func (r *CrunchyBridgeClusterReconciler) handleUpgradeHA(ctx context.Context, apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, -) (ctrl.Result, error) { +) ctrl.Result { log := ctrl.LoggerFrom(ctx) log.Info("Handling HA change request") @@ -613,7 +596,7 @@ func (r *CrunchyBridgeClusterReconciler) handleUpgradeHA(ctx context.Context, "Error performing an HA upgrade: %s", err), }) log.Error(err, "Error while attempting cluster HA change") - return ctrl.Result{}, nil + return ctrl.Result{} } clusterUpgrade.AddDataToClusterStatus(crunchybridgecluster) if len(clusterUpgrade.Operations) != 0 { @@ -628,14 +611,14 @@ func (r *CrunchyBridgeClusterReconciler) handleUpgradeHA(ctx context.Context, }) } - return ctrl.Result{RequeueAfter: 3 * time.Minute}, nil + return runtime.RequeueWithoutBackoff(3 * time.Minute) } // handleUpdate handles upgrades that hit the "PATCH /clusters/" endpoint func (r *CrunchyBridgeClusterReconciler) handleUpdate(ctx context.Context, apiKey string, crunchybridgecluster *v1beta1.CrunchyBridgeCluster, -) (ctrl.Result, error) { +) ctrl.Result { log := ctrl.LoggerFrom(ctx) log.Info("Handling update request") @@ -660,7 +643,7 @@ func (r *CrunchyBridgeClusterReconciler) handleUpdate(ctx context.Context, "Error performing an upgrade: %s", err), }) log.Error(err, "Error while attempting cluster update") - return ctrl.Result{}, nil + return ctrl.Result{} } clusterUpdate.AddDataToClusterStatus(crunchybridgecluster) meta.SetStatusCondition(&crunchybridgecluster.Status.Conditions, metav1.Condition{ @@ -673,7 +656,7 @@ func (r *CrunchyBridgeClusterReconciler) handleUpdate(ctx context.Context, clusterUpdate.ClusterName, *clusterUpdate.IsProtected), }) - return ctrl.Result{RequeueAfter: 3 * time.Minute}, nil + return runtime.RequeueWithoutBackoff(3 * time.Minute) } // GetSecretKeys gets the secret and returns the expected API key and team id diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go index 1cbd555e6a..92d6b58d0e 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster @@ -196,9 +185,8 @@ func TestHandleCreateCluster(t *testing.T) { cluster := testCluster() cluster.Namespace = ns - controllerResult, err := reconciler.handleCreateCluster(ctx, testApiKey, testTeamId, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleCreateCluster(ctx, testApiKey, testTeamId, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) assert.Equal(t, cluster.Status.ID, "0") readyCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionReady) @@ -222,8 +210,7 @@ func TestHandleCreateCluster(t *testing.T) { cluster := testCluster() cluster.Namespace = ns - controllerResult, err := reconciler.handleCreateCluster(ctx, "bad_api_key", testTeamId, cluster) - assert.NilError(t, err) + controllerResult := reconciler.handleCreateCluster(ctx, "bad_api_key", testTeamId, cluster) assert.Equal(t, controllerResult, ctrl.Result{}) assert.Equal(t, cluster.Status.ID, "") @@ -485,9 +472,8 @@ func TestHandleUpgrade(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.Plan = "standard-16" // originally "standard-8" - controllerResult, err := reconciler.handleUpgrade(ctx, testApiKey, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) @@ -508,9 +494,8 @@ func TestHandleUpgrade(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.PostgresVersion = 16 // originally "15" - controllerResult, err := reconciler.handleUpgrade(ctx, testApiKey, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) @@ -531,9 +516,8 @@ func TestHandleUpgrade(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" - controllerResult, err := reconciler.handleUpgrade(ctx, testApiKey, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleUpgrade(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) @@ -554,8 +538,7 @@ func TestHandleUpgrade(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.Storage = resource.MustParse("15Gi") // originally "10Gi" - controllerResult, err := reconciler.handleUpgrade(ctx, "bad_api_key", cluster) - assert.NilError(t, err) + controllerResult := reconciler.handleUpgrade(ctx, "bad_api_key", cluster) assert.Equal(t, controllerResult, ctrl.Result{}) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { @@ -597,9 +580,8 @@ func TestHandleUpgradeHA(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.IsHA = true // originally "false" - controllerResult, err := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) @@ -619,9 +601,8 @@ func TestHandleUpgradeHA(t *testing.T) { cluster.Namespace = ns cluster.Status.ID = "2345" - controllerResult, err := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleUpgradeHA(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) @@ -641,8 +622,7 @@ func TestHandleUpgradeHA(t *testing.T) { cluster.Namespace = ns cluster.Status.ID = "1234" - controllerResult, err := reconciler.handleUpgradeHA(ctx, "bad_api_key", cluster) - assert.NilError(t, err) + controllerResult := reconciler.handleUpgradeHA(ctx, "bad_api_key", cluster) assert.Equal(t, controllerResult, ctrl.Result{}) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { @@ -680,9 +660,8 @@ func TestHandleUpdate(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.ClusterName = "new-cluster-name" // originally "hippo-cluster" - controllerResult, err := reconciler.handleUpdate(ctx, testApiKey, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) @@ -699,9 +678,8 @@ func TestHandleUpdate(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.IsProtected = true // originally "false" - controllerResult, err := reconciler.handleUpdate(ctx, testApiKey, cluster) - assert.NilError(t, err) - assert.Equal(t, controllerResult, ctrl.Result{RequeueAfter: 3 * time.Minute}) + controllerResult := reconciler.handleUpdate(ctx, testApiKey, cluster) + assert.Equal(t, controllerResult.RequeueAfter, 3*time.Minute) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { assert.Equal(t, upgradingCondition.Status, metav1.ConditionTrue) @@ -718,8 +696,7 @@ func TestHandleUpdate(t *testing.T) { cluster.Status.ID = "1234" cluster.Spec.IsProtected = true // originally "false" - controllerResult, err := reconciler.handleUpdate(ctx, "bad_api_key", cluster) - assert.NilError(t, err) + controllerResult := reconciler.handleUpdate(ctx, "bad_api_key", cluster) assert.Equal(t, controllerResult, ctrl.Result{}) upgradingCondition := meta.FindStatusCondition(cluster.Status.Conditions, v1beta1.ConditionUpgrading) if assert.Check(t, upgradingCondition != nil) { diff --git a/internal/bridge/crunchybridgecluster/delete.go b/internal/bridge/crunchybridgecluster/delete.go index bdaa040b16..8dcada31cf 100644 --- a/internal/bridge/crunchybridgecluster/delete.go +++ b/internal/bridge/crunchybridgecluster/delete.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster @@ -22,6 +11,8 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -58,7 +49,7 @@ func (r *CrunchyBridgeClusterReconciler) handleDelete( } if !deletedAlready { - return &ctrl.Result{RequeueAfter: 1 * time.Second}, err + return initialize.Pointer(runtime.RequeueWithoutBackoff(time.Second)), err } // Remove finalizer if deleted already diff --git a/internal/bridge/crunchybridgecluster/delete_test.go b/internal/bridge/crunchybridgecluster/delete_test.go index db6fc1a5f3..28e6feb1f8 100644 --- a/internal/bridge/crunchybridgecluster/delete_test.go +++ b/internal/bridge/crunchybridgecluster/delete_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster @@ -85,7 +74,7 @@ func TestHandleDeleteCluster(t *testing.T) { cluster.Status.ID = "1234" controllerResult, err = reconciler.handleDelete(ctx, cluster, "9012") assert.NilError(t, err) - assert.Equal(t, *controllerResult, ctrl.Result{RequeueAfter: 1 * time.Second}) + assert.Equal(t, controllerResult.RequeueAfter, 1*time.Second) assert.Equal(t, len(testBridgeClient.Clusters), 1) assert.Equal(t, testBridgeClient.Clusters[0].ClusterName, "bridge-cluster-2") diff --git a/internal/bridge/crunchybridgecluster/helpers_test.go b/internal/bridge/crunchybridgecluster/helpers_test.go index a290934321..f40ad3d054 100644 --- a/internal/bridge/crunchybridgecluster/helpers_test.go +++ b/internal/bridge/crunchybridgecluster/helpers_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster diff --git a/internal/bridge/crunchybridgecluster/mock_bridge_api.go b/internal/bridge/crunchybridgecluster/mock_bridge_api.go index 42116e3afb..5c6b243714 100644 --- a/internal/bridge/crunchybridgecluster/mock_bridge_api.go +++ b/internal/bridge/crunchybridgecluster/mock_bridge_api.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go index 9fd36dafaa..024631de67 100644 --- a/internal/bridge/crunchybridgecluster/postgres.go +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster @@ -27,7 +16,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/bridge" - "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -45,11 +33,11 @@ func (r *CrunchyBridgeClusterReconciler) generatePostgresRoleSecret( Name: secretName, }} intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - initialize.StringMap(&intent.StringData) - - intent.StringData["name"] = clusterRole.Name - intent.StringData["password"] = clusterRole.Password - intent.StringData["uri"] = clusterRole.URI + intent.StringData = map[string]string{ + "name": clusterRole.Name, + "password": clusterRole.Password, + "uri": clusterRole.URI, + } intent.Annotations = cluster.Spec.Metadata.GetAnnotationsOrNil() intent.Labels = naming.Merge( diff --git a/internal/bridge/crunchybridgecluster/postgres_test.go b/internal/bridge/crunchybridgecluster/postgres_test.go index a2a854be9f..66add7b789 100644 --- a/internal/bridge/crunchybridgecluster/postgres_test.go +++ b/internal/bridge/crunchybridgecluster/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster diff --git a/internal/bridge/crunchybridgecluster/watches.go b/internal/bridge/crunchybridgecluster/watches.go index eefc30c2ae..79687b3476 100644 --- a/internal/bridge/crunchybridgecluster/watches.go +++ b/internal/bridge/crunchybridgecluster/watches.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster @@ -31,8 +20,7 @@ import ( // watchForRelatedSecret handles create/update/delete events for secrets, // passing the Secret ObjectKey to findCrunchyBridgeClustersForSecret func (r *CrunchyBridgeClusterReconciler) watchForRelatedSecret() handler.EventHandler { - handle := func(secret client.Object, q workqueue.RateLimitingInterface) { - ctx := context.Background() + handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { key := client.ObjectKeyFromObject(secret) for _, cluster := range r.findCrunchyBridgeClustersForSecret(ctx, key) { @@ -43,11 +31,11 @@ func (r *CrunchyBridgeClusterReconciler) watchForRelatedSecret() handler.EventHa } return handler.Funcs{ - CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(e.ObjectNew, q) + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) }, // If the secret is deleted, we want to reconcile // in order to emit an event/status about this problem. @@ -55,8 +43,8 @@ func (r *CrunchyBridgeClusterReconciler) watchForRelatedSecret() handler.EventHa // when we reconcile the cluster and can't find the secret. // That way, users will get two alerts: one when the secret is deleted // and another when the cluster is being reconciled. - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, } } @@ -91,8 +79,7 @@ func (r *CrunchyBridgeClusterReconciler) findCrunchyBridgeClustersForSecret( // Watch enqueues all existing CrunchyBridgeClusters for reconciles. func (r *CrunchyBridgeClusterReconciler) Watch() handler.EventHandler { - return handler.EnqueueRequestsFromMapFunc(func(client.Object) []reconcile.Request { - ctx := context.Background() + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { log := ctrl.LoggerFrom(ctx) crunchyBridgeClusterList := &v1beta1.CrunchyBridgeClusterList{} diff --git a/internal/bridge/crunchybridgecluster/watches_test.go b/internal/bridge/crunchybridgecluster/watches_test.go index a95bd58bc5..48dba2ba14 100644 --- a/internal/bridge/crunchybridgecluster/watches_test.go +++ b/internal/bridge/crunchybridgecluster/watches_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package crunchybridgecluster diff --git a/internal/bridge/installation.go b/internal/bridge/installation.go index e79a5e0dcf..c76a073348 100644 --- a/internal/bridge/installation.go +++ b/internal/bridge/installation.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge @@ -61,7 +50,7 @@ type Installation struct { type InstallationReconciler struct { Owner client.FieldOwner Reader interface { - Get(context.Context, client.ObjectKey, client.Object) error + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error } Writer interface { Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error @@ -102,11 +91,14 @@ func ManagedInstallationReconciler(m manager.Manager, newClient func() *Client) )). // // Wake periodically even when that Secret does not exist. - Watches( - runtime.NewTickerImmediate(time.Hour, event.GenericEvent{}), - handler.EnqueueRequestsFromMapFunc(func(client.Object) []reconcile.Request { - return []reconcile.Request{{NamespacedName: reconciler.SecretRef}} - }), + WatchesRawSource( + runtime.NewTickerImmediate(time.Hour, event.GenericEvent{}, + handler.EnqueueRequestsFromMapFunc( + func(context.Context, client.Object) []reconcile.Request { + return []reconcile.Request{{NamespacedName: reconciler.SecretRef}} + }, + ), + ), ). // Complete(reconciler) @@ -128,13 +120,15 @@ func (r *InstallationReconciler) Reconcile( result.RequeueAfter, err = r.reconcile(ctx, secret) } - // TODO: Check for corev1.NamespaceTerminatingCause after - // k8s.io/apimachinery@v0.25; see https://issue.k8s.io/108528. + // Nothing can be written to a deleted namespace. + if err != nil && apierrors.HasStatusCause(err, corev1.NamespaceTerminatingCause) { + return runtime.ErrorWithoutBackoff(err) + } // Write conflicts are returned as errors; log and retry with backoff. if err != nil && apierrors.IsConflict(err) { logging.FromContext(ctx).Info("Requeue", "reason", err) - err, result.Requeue, result.RequeueAfter = nil, true, 0 + return runtime.RequeueWithBackoff(), nil } return result, err diff --git a/internal/bridge/installation_test.go b/internal/bridge/installation_test.go index e062de8d18..96223a2233 100644 --- a/internal/bridge/installation_test.go +++ b/internal/bridge/installation_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/bridge/naming.go b/internal/bridge/naming.go index 7a0124ae7a..cabe8e9cf6 100644 --- a/internal/bridge/naming.go +++ b/internal/bridge/naming.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/bridge/quantity.go b/internal/bridge/quantity.go index 1c1915b716..a948c6b4cf 100644 --- a/internal/bridge/quantity.go +++ b/internal/bridge/quantity.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/bridge/quantity_test.go b/internal/bridge/quantity_test.go index e9d2cce100..7cfebb4a86 100644 --- a/internal/bridge/quantity_test.go +++ b/internal/bridge/quantity_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package bridge diff --git a/internal/config/config.go b/internal/config/config.go index 3fe8a81068..e3f9ced215 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package config diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 66fc91e752..7b8ca2f863 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package config @@ -25,30 +14,6 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -func saveEnv(t testing.TB, key string) { - t.Helper() - previous, ok := os.LookupEnv(key) - t.Cleanup(func() { - if ok { - os.Setenv(key, previous) - } else { - os.Unsetenv(key) - } - }) -} - -func setEnv(t testing.TB, key, value string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Setenv(key, value)) -} - -func unsetEnv(t testing.TB, key string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Unsetenv(key)) -} - func TestFetchKeyCommand(t *testing.T) { spec1 := v1beta1.PostgresClusterSpec{} @@ -117,13 +82,14 @@ func TestFetchKeyCommand(t *testing.T) { func TestPGAdminContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGADMIN") + t.Setenv("RELATED_IMAGE_PGADMIN", "") + os.Unsetenv("RELATED_IMAGE_PGADMIN") assert.Equal(t, PGAdminContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGADMIN", "") + t.Setenv("RELATED_IMAGE_PGADMIN", "") assert.Equal(t, PGAdminContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGADMIN", "env-var-pgadmin") + t.Setenv("RELATED_IMAGE_PGADMIN", "env-var-pgadmin") assert.Equal(t, PGAdminContainerImage(cluster), "env-var-pgadmin") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -135,13 +101,14 @@ func TestPGAdminContainerImage(t *testing.T) { func TestPGBackRestContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGBACKREST") + t.Setenv("RELATED_IMAGE_PGBACKREST", "") + os.Unsetenv("RELATED_IMAGE_PGBACKREST") assert.Equal(t, PGBackRestContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBACKREST", "") + t.Setenv("RELATED_IMAGE_PGBACKREST", "") assert.Equal(t, PGBackRestContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBACKREST", "env-var-pgbackrest") + t.Setenv("RELATED_IMAGE_PGBACKREST", "env-var-pgbackrest") assert.Equal(t, PGBackRestContainerImage(cluster), "env-var-pgbackrest") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -153,13 +120,14 @@ func TestPGBackRestContainerImage(t *testing.T) { func TestPGBouncerContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGBOUNCER") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "") + os.Unsetenv("RELATED_IMAGE_PGBOUNCER") assert.Equal(t, PGBouncerContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBOUNCER", "") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "") assert.Equal(t, PGBouncerContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGBOUNCER", "env-var-pgbouncer") + t.Setenv("RELATED_IMAGE_PGBOUNCER", "env-var-pgbouncer") assert.Equal(t, PGBouncerContainerImage(cluster), "env-var-pgbouncer") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -171,13 +139,14 @@ func TestPGBouncerContainerImage(t *testing.T) { func TestPGExporterContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} - unsetEnv(t, "RELATED_IMAGE_PGEXPORTER") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "") + os.Unsetenv("RELATED_IMAGE_PGEXPORTER") assert.Equal(t, PGExporterContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGEXPORTER", "") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "") assert.Equal(t, PGExporterContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_PGEXPORTER", "env-var-pgexporter") + t.Setenv("RELATED_IMAGE_PGEXPORTER", "env-var-pgexporter") assert.Equal(t, PGExporterContainerImage(cluster), "env-var-pgexporter") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -189,13 +158,14 @@ func TestPGExporterContainerImage(t *testing.T) { func TestStandalonePGAdminContainerImage(t *testing.T) { pgadmin := &v1beta1.PGAdmin{} - unsetEnv(t, "RELATED_IMAGE_STANDALONE_PGADMIN") + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "") + os.Unsetenv("RELATED_IMAGE_STANDALONE_PGADMIN") assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "") - setEnv(t, "RELATED_IMAGE_STANDALONE_PGADMIN", "") + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "") assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "") - setEnv(t, "RELATED_IMAGE_STANDALONE_PGADMIN", "env-var-pgadmin") + t.Setenv("RELATED_IMAGE_STANDALONE_PGADMIN", "env-var-pgadmin") assert.Equal(t, StandalonePGAdminContainerImage(pgadmin), "env-var-pgadmin") assert.NilError(t, yaml.Unmarshal([]byte(`{ @@ -208,13 +178,14 @@ func TestPostgresContainerImage(t *testing.T) { cluster := &v1beta1.PostgresCluster{} cluster.Spec.PostgresVersion = 12 - unsetEnv(t, "RELATED_IMAGE_POSTGRES_12") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "") + os.Unsetenv("RELATED_IMAGE_POSTGRES_12") assert.Equal(t, PostgresContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_POSTGRES_12", "") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "") assert.Equal(t, PostgresContainerImage(cluster), "") - setEnv(t, "RELATED_IMAGE_POSTGRES_12", "env-var-postgres") + t.Setenv("RELATED_IMAGE_POSTGRES_12", "env-var-postgres") assert.Equal(t, PostgresContainerImage(cluster), "env-var-postgres") cluster.Spec.Image = "spec-image" @@ -222,7 +193,7 @@ func TestPostgresContainerImage(t *testing.T) { cluster.Spec.Image = "" cluster.Spec.PostGISVersion = "3.0" - setEnv(t, "RELATED_IMAGE_POSTGRES_12_GIS_3.0", "env-var-postgis") + t.Setenv("RELATED_IMAGE_POSTGRES_12_GIS_3.0", "env-var-postgis") assert.Equal(t, PostgresContainerImage(cluster), "env-var-postgis") cluster.Spec.Image = "spec-image" @@ -233,7 +204,9 @@ func TestVerifyImageValues(t *testing.T) { cluster := &v1beta1.PostgresCluster{} verifyImageCheck := func(t *testing.T, envVar, errString string, cluster *v1beta1.PostgresCluster) { - unsetEnv(t, envVar) + + t.Setenv(envVar, "") + os.Unsetenv(envVar) err := VerifyImageValues(cluster) assert.ErrorContains(t, err, errString) } diff --git a/internal/controller/pgupgrade/apply.go b/internal/controller/pgupgrade/apply.go index 5e3719cb19..71cf65cd4f 100644 --- a/internal/controller/pgupgrade/apply.go +++ b/internal/controller/pgupgrade/apply.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index 045df3a929..a1722dfc12 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade @@ -192,8 +182,8 @@ func (r *PGUpgradeReconciler) generateUpgradeJob( // The following will set these fields to null if not set in the spec job.Spec.Template.Spec.Affinity = upgrade.Spec.Affinity - job.Spec.Template.Spec.PriorityClassName = initialize.FromPointer( - upgrade.Spec.PriorityClassName) + job.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(upgrade.Spec.PriorityClassName) job.Spec.Template.Spec.Tolerations = upgrade.Spec.Tolerations r.setControllerReference(upgrade, job) @@ -302,8 +292,8 @@ func (r *PGUpgradeReconciler) generateRemoveDataJob( // The following will set these fields to null if not set in the spec job.Spec.Template.Spec.Affinity = upgrade.Spec.Affinity - job.Spec.Template.Spec.PriorityClassName = initialize.FromPointer( - upgrade.Spec.PriorityClassName) + job.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(upgrade.Spec.PriorityClassName) job.Spec.Template.Spec.Tolerations = upgrade.Spec.Tolerations r.setControllerReference(upgrade, job) diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index 9bdda64d02..8dfc4731a2 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade @@ -21,25 +11,16 @@ import ( "testing" "gotest.tools/v3/assert" - "gotest.tools/v3/assert/cmp" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - b, err := yaml.Marshal(actual) - if err != nil { - return func() cmp.Result { return cmp.ResultFromError(err) } - } - return cmp.DeepEqual(string(b), strings.Trim(expected, "\t\n")+"\n") -} - func TestGenerateUpgradeJob(t *testing.T) { ctx := context.Background() reconciler := &PGUpgradeReconciler{} @@ -77,7 +58,7 @@ func TestGenerateUpgradeJob(t *testing.T) { } job := reconciler.generateUpgradeJob(ctx, upgrade, startup, "") - assert.Assert(t, marshalMatches(job, ` + assert.Assert(t, cmp.MarshalMatches(job, ` apiVersion: batch/v1 kind: Job metadata: @@ -208,7 +189,7 @@ func TestGenerateRemoveDataJob(t *testing.T) { } job := reconciler.generateRemoveDataJob(ctx, upgrade, sts) - assert.Assert(t, marshalMatches(job, ` + assert.Assert(t, cmp.MarshalMatches(job, ` apiVersion: batch/v1 kind: Job metadata: @@ -271,42 +252,17 @@ status: {} `)) } -// saveEnv preserves environment variables so that any modifications needed for -// the tests can be undone once completed. -func saveEnv(t testing.TB, key string) { - t.Helper() - previous, ok := os.LookupEnv(key) - t.Cleanup(func() { - if ok { - os.Setenv(key, previous) - } else { - os.Unsetenv(key) - } - }) -} - -func setEnv(t testing.TB, key, value string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Setenv(key, value)) -} - -func unsetEnv(t testing.TB, key string) { - t.Helper() - saveEnv(t, key) - assert.NilError(t, os.Unsetenv(key)) -} - func TestPGUpgradeContainerImage(t *testing.T) { upgrade := &v1beta1.PGUpgrade{} - unsetEnv(t, "RELATED_IMAGE_PGUPGRADE") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") + os.Unsetenv("RELATED_IMAGE_PGUPGRADE") assert.Equal(t, pgUpgradeContainerImage(upgrade), "") - setEnv(t, "RELATED_IMAGE_PGUPGRADE", "") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") assert.Equal(t, pgUpgradeContainerImage(upgrade), "") - setEnv(t, "RELATED_IMAGE_PGUPGRADE", "env-var-pgbackrest") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "env-var-pgbackrest") assert.Equal(t, pgUpgradeContainerImage(upgrade), "env-var-pgbackrest") assert.NilError(t, yaml.Unmarshal( @@ -318,7 +274,8 @@ func TestVerifyUpgradeImageValue(t *testing.T) { upgrade := &v1beta1.PGUpgrade{} t.Run("crunchy-postgres", func(t *testing.T) { - unsetEnv(t, "RELATED_IMAGE_PGUPGRADE") + t.Setenv("RELATED_IMAGE_PGUPGRADE", "") + os.Unsetenv("RELATED_IMAGE_PGUPGRADE") err := verifyUpgradeImageValue(upgrade) assert.ErrorContains(t, err, "crunchy-upgrade") }) diff --git a/internal/controller/pgupgrade/labels.go b/internal/controller/pgupgrade/labels.go index e7cf11bc0e..187fe6bf6f 100644 --- a/internal/controller/pgupgrade/labels.go +++ b/internal/controller/pgupgrade/labels.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index 3592d4e93c..d6d145b793 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade @@ -29,9 +19,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -59,7 +49,7 @@ func (r *PGUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&v1beta1.PGUpgrade{}). Owns(&batchv1.Job{}). Watches( - &source.Kind{Type: v1beta1.NewPostgresCluster()}, + v1beta1.NewPostgresCluster(), r.watchPostgresClusters(), ). Complete(r) @@ -92,8 +82,7 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( // watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. func (r *PGUpgradeReconciler) watchPostgresClusters() handler.Funcs { - handle := func(cluster client.Object, q workqueue.RateLimitingInterface) { - ctx := context.Background() + handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { key := client.ObjectKeyFromObject(cluster) for _, upgrade := range r.findUpgradesForPostgresCluster(ctx, key) { @@ -104,14 +93,14 @@ func (r *PGUpgradeReconciler) watchPostgresClusters() handler.Funcs { } return handler.Funcs{ - CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(e.ObjectNew, q) + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) }, - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, } } @@ -495,7 +484,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // Requeue to verify that Patroni endpoints are deleted - return ctrl.Result{Requeue: true}, err // FIXME + return runtime.RequeueWithBackoff(), err // FIXME } // TODO: write upgradeJob back to world? No, we will wake and see it when it @@ -503,9 +492,7 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // TODO: consider what it means to "re-use" the same PGUpgrade for more than // one postgres version. Should the job name include the version number? - log.Info("Reconciled", "requeue", err != nil || - result.Requeue || - result.RequeueAfter > 0) + log.Info("Reconciled", "requeue", !result.IsZero() || err != nil) return } diff --git a/internal/controller/pgupgrade/registration.go b/internal/controller/pgupgrade/registration.go index 895f1a44a1..05d0d80cbd 100644 --- a/internal/controller/pgupgrade/registration.go +++ b/internal/controller/pgupgrade/registration.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/registration_test.go b/internal/controller/pgupgrade/registration_test.go index dccd9e893d..dc3a4144bc 100644 --- a/internal/controller/pgupgrade/registration_test.go +++ b/internal/controller/pgupgrade/registration_test.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/utils.go b/internal/controller/pgupgrade/utils.go index e5b62d1d46..292107e440 100644 --- a/internal/controller/pgupgrade/utils.go +++ b/internal/controller/pgupgrade/utils.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/world.go b/internal/controller/pgupgrade/world.go index a3e15e84c7..18d056fe25 100644 --- a/internal/controller/pgupgrade/world.go +++ b/internal/controller/pgupgrade/world.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/pgupgrade/world_test.go b/internal/controller/pgupgrade/world_test.go index d65da88df6..4aa24f714d 100644 --- a/internal/controller/pgupgrade/world_test.go +++ b/internal/controller/pgupgrade/world_test.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package pgupgrade diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index dbdf20d785..2dae1f7d80 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -1,33 +1,15 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" - "encoding/json" - "fmt" "reflect" - jsonpatch "github.com/evanphx/json-patch/v5" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/kubeapi" @@ -57,11 +39,6 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { // does not match the intent, send a json-patch to get really specific. switch actual := object.(type) { case *corev1.Service: - // Changing Service.Spec.Type requires a special apply-patch sometimes. - if err != nil { - err = r.handleServiceError(ctx, object.(*corev1.Service), data, err) - } - applyServiceSpec(patch, actual.Spec, intent.(*corev1.Service).Spec, "spec") } @@ -72,53 +49,6 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { return err } -// handleServiceError inspects err for expected Kubernetes API responses to -// writing a Service. It returns err when it cannot resolve the issue, otherwise -// it returns nil. -func (r *Reconciler) handleServiceError( - ctx context.Context, service *corev1.Service, apply []byte, err error, -) error { - var status metav1.Status - if api := apierrors.APIStatus(nil); errors.As(err, &api) { - status = api.Status() - } - - // Service.Spec.Ports.NodePort must be cleared for ClusterIP prior to - // Kubernetes 1.20. When all the errors are about disallowed "nodePort", - // run a json-patch on the apply-patch to set them all to null. - // - https://issue.k8s.io/33766 - if service.Spec.Type == corev1.ServiceTypeClusterIP { - add := json.RawMessage(`"add"`) - null := json.RawMessage(`null`) - patch := make(jsonpatch.Patch, 0, len(service.Spec.Ports)) - - if apierrors.IsInvalid(err) && status.Details != nil { - for i, cause := range status.Details.Causes { - path := json.RawMessage(fmt.Sprintf(`"/spec/ports/%d/nodePort"`, i)) - - if cause.Type == metav1.CauseType(field.ErrorTypeForbidden) && - cause.Field == fmt.Sprintf("spec.ports[%d].nodePort", i) { - patch = append(patch, - jsonpatch.Operation{"op": &add, "value": &null, "path": &path}) - } - } - } - - // Amend the apply-patch when all the errors can be fixed. - if len(patch) == len(service.Spec.Ports) { - apply, err = patch.Apply(apply) - } - - // Send the apply-patch with force=true. - if err == nil { - patch := client.RawPatch(client.Apply.Type(), apply) - err = r.patch(ctx, service, patch, client.ForceOwnership) - } - } - - return err -} - // applyServiceSpec is called by Reconciler.apply to work around issues // with server-side apply. func applyServiceSpec( diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index 007aebbd9d..c163e8a5ab 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -310,55 +299,4 @@ func TestServerSideApply(t *testing.T) { }) } }) - - t.Run("ServiceType", func(t *testing.T) { - constructor := func(name string) *corev1.Service { - var service corev1.Service - service.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) - service.Namespace, service.Name = ns.Name, name - service.Spec.Ports = []corev1.ServicePort{ - {Name: "one", Port: 9999, Protocol: corev1.ProtocolTCP}, - {Name: "two", Port: 1234, Protocol: corev1.ProtocolTCP}, - } - return &service - } - - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} - - // Start as NodePort. - intent := constructor("node-port") - intent.Spec.Type = corev1.ServiceTypeNodePort - - // Create the Service. - before := intent.DeepCopy() - assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) - - // Change to ClusterIP. - intent.Spec.Type = corev1.ServiceTypeClusterIP - - // client.Apply cannot change it in old versions of Kubernetes. - after := intent.DeepCopy() - err := cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner) - - switch { - case serverVersion.LessThan(version.MustParseGeneric("1.20")): - - assert.ErrorContains(t, err, "nodePort: Forbidden", - "expected https://issue.k8s.io/33766") - - default: - assert.NilError(t, err) - assert.Equal(t, after.Spec.Type, intent.Spec.Type) - assert.Equal(t, after.Spec.ClusterIP, before.Spec.ClusterIP, - "expected to keep the same ClusterIP") - } - - // Our apply method changes it. - again := intent.DeepCopy() - assert.NilError(t, reconciler.apply(ctx, again)) - assert.Equal(t, again.Spec.Type, intent.Spec.Type) - assert.Equal(t, again.Spec.ClusterIP, before.Spec.ClusterIP, - "expected to keep the same ClusterIP") - }) } diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index 8d32679db3..3ba6eab0e8 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -26,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/patroni" "github.com/crunchydata/postgres-operator/internal/pki" @@ -248,6 +238,8 @@ func (r *Reconciler) generateClusterReplicaService( } servicePort.NodePort = *spec.NodePort } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy } service.Spec.Ports = []corev1.ServicePort{servicePort} @@ -290,7 +282,9 @@ func (r *Reconciler) reconcileClusterReplicaService( func (r *Reconciler) reconcileDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, observed *observedInstances, clusterVolumes []corev1.PersistentVolumeClaim, - rootCA *pki.RootCertificateAuthority) (bool, error) { + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) (bool, error) { // a hash func to hash the pgBackRest restore options hashFunc := func(jobConfigs []string) (string, error) { @@ -413,7 +407,8 @@ func (r *Reconciler) reconcileDataSource(ctx context.Context, switch { case dataSource != nil: if err := r.reconcilePostgresClusterDataSource(ctx, cluster, dataSource, - configHash, clusterVolumes, rootCA); err != nil { + configHash, clusterVolumes, rootCA, + backupsSpecFound); err != nil { return true, err } case cloudDataSource != nil: diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index 2465621b4e..be9e371a56 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -36,6 +25,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -611,11 +601,11 @@ func TestGenerateClusterPrimaryService(t *testing.T) { assert.ErrorContains(t, err, "not implemented") alwaysExpect := func(t testing.TB, service *corev1.Service, endpoints *corev1.Endpoints) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg5 @@ -630,7 +620,7 @@ ownerReferences: name: pg5 uid: "" `)) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 2600 protocol: TCP @@ -641,7 +631,7 @@ ownerReferences: assert.Assert(t, service.Spec.Selector == nil, "got %v", service.Spec.Selector) - assert.Assert(t, marshalMatches(endpoints, ` + assert.Assert(t, cmp.MarshalMatches(endpoints, ` apiVersion: v1 kind: Endpoints metadata: @@ -730,11 +720,11 @@ func TestGenerateClusterReplicaServiceIntent(t *testing.T) { assert.NilError(t, err) alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg2 @@ -752,7 +742,7 @@ ownerReferences: } alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec, ` ports: - name: postgres port: 9876 @@ -788,7 +778,7 @@ type: ClusterIP assert.NilError(t, err) alwaysExpect(t, service) test.Expect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 9876 protocol: TCP @@ -808,19 +798,19 @@ type: ClusterIP assert.NilError(t, err) // Annotations present in the metadata. - assert.Assert(t, marshalMatches(service.ObjectMeta.Annotations, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Annotations, ` some: note `)) // Labels present in the metadata. - assert.Assert(t, marshalMatches(service.ObjectMeta.Labels, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Labels, ` happy: label postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica `)) // Labels not in the selector. - assert.Assert(t, marshalMatches(service.Spec.Selector, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Selector, ` postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica `)) diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 30e2918961..d459d30a10 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -1,28 +1,16 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "context" + "errors" "fmt" "io" - "os" - "strconv" + "time" - "github.com/pkg/errors" "go.opentelemetry.io/otel/trace" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -30,20 +18,21 @@ import ( policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/discovery" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/pgaudit" "github.com/crunchydata/postgres-operator/internal/pgbackrest" @@ -62,11 +51,12 @@ const ( // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { - Client client.Client - IsOpenShift bool - Owner client.FieldOwner - PodExec func( - namespace, pod, container string, + Client client.Client + DiscoveryClient *discovery.DiscoveryClient + IsOpenShift bool + Owner client.FieldOwner + PodExec func( + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error Recorder record.EventRecorder @@ -86,15 +76,6 @@ func (r *Reconciler) Reconcile( log := logging.FromContext(ctx) defer span.End() - // create the result that will be updated following a call to each reconciler - result := reconcile.Result{} - updateResult := func(next reconcile.Result, err error) error { - if err == nil { - result = updateReconcileResult(result, next) - } - return err - } - // get the postgrescluster from the cache cluster := &v1beta1.PostgresCluster{} if err := r.Client.Get(ctx, request.NamespacedName, cluster); err != nil { @@ -105,7 +86,7 @@ func (r *Reconciler) Reconcile( log.Error(err, "unable to fetch PostgresCluster") span.RecordError(err) } - return result, err + return runtime.ErrorWithBackoff(err) } // Set any defaults that may not have been stored in the API. No DeepCopy @@ -131,15 +112,10 @@ func (r *Reconciler) Reconcile( if result, err := r.handleDelete(ctx, cluster); err != nil { span.RecordError(err) log.Error(err, "deleting") - return reconcile.Result{}, err + return runtime.ErrorWithBackoff(err) } else if result != nil { if log := log.V(1); log.Enabled() { - if result.RequeueAfter > 0 { - // RequeueAfter implies Requeue, but set both to make the next - // log message more clear. - result.Requeue = true - } log.Info("deleting", "result", fmt.Sprintf("%+v", *result)) } return *result, nil @@ -156,9 +132,8 @@ func (r *Reconciler) Reconcile( err.Error()) // specifically allow reconciliation if the cluster is shutdown to // facilitate upgrades, otherwise return - if cluster.Spec.Shutdown == nil || - (cluster.Spec.Shutdown != nil && !*cluster.Spec.Shutdown) { - return result, err + if !initialize.FromPointer(cluster.Spec.Shutdown) { + return runtime.ErrorWithBackoff(err) } } @@ -171,44 +146,43 @@ func (r *Reconciler) Reconcile( // this configuration and provide an event path := field.NewPath("spec", "standby") err := field.Invalid(path, cluster.Name, "Standby requires a host or repoName to be enabled") - r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration", - err.Error()) - return result, err + r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration", err.Error()) + return runtime.ErrorWithBackoff(err) } var ( - clusterConfigMap *corev1.ConfigMap - clusterReplicationSecret *corev1.Secret - clusterPodService *corev1.Service - clusterVolumes []corev1.PersistentVolumeClaim - instanceServiceAccount *corev1.ServiceAccount - instances *observedInstances - patroniLeaderService *corev1.Service - primaryCertificate *corev1.SecretProjection - primaryService *corev1.Service - replicaService *corev1.Service - rootCA *pki.RootCertificateAuthority - monitoringSecret *corev1.Secret - exporterQueriesConfig *corev1.ConfigMap - exporterWebConfig *corev1.ConfigMap - err error + clusterConfigMap *corev1.ConfigMap + clusterReplicationSecret *corev1.Secret + clusterPodService *corev1.Service + clusterVolumes []corev1.PersistentVolumeClaim + instanceServiceAccount *corev1.ServiceAccount + instances *observedInstances + patroniLeaderService *corev1.Service + primaryCertificate *corev1.SecretProjection + primaryService *corev1.Service + replicaService *corev1.Service + rootCA *pki.RootCertificateAuthority + monitoringSecret *corev1.Secret + exporterQueriesConfig *corev1.ConfigMap + exporterWebConfig *corev1.ConfigMap + err error + backupsSpecFound bool + backupsReconciliationAllowed bool + dedicatedSnapshotPVC *corev1.PersistentVolumeClaim ) - // Define a function for updating PostgresCluster status. Returns any error that - // occurs while attempting to patch the status, while otherwise simply returning the - // Result and error variables that are populated while reconciling the PostgresCluster. - patchClusterStatus := func() (reconcile.Result, error) { + patchClusterStatus := func() error { if !equality.Semantic.DeepEqual(before.Status, cluster.Status) { // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track // managed fields on the status subresource: https://issue.k8s.io/88901 - if err := errors.WithStack(r.Client.Status().Patch( - ctx, cluster, client.MergeFrom(before), r.Owner)); err != nil { + if err := r.Client.Status().Patch( + ctx, cluster, client.MergeFrom(before), r.Owner); err != nil { log.Error(err, "patching cluster status") - return result, err + return err } log.V(1).Info("patched cluster status") } - return result, err + return nil } if r.Registration != nil && r.Registration.Required(r.Recorder, cluster, &cluster.Status.Conditions) { @@ -227,18 +201,39 @@ func (r *Reconciler) Reconcile( ObservedGeneration: cluster.GetGeneration(), }) - return patchClusterStatus() + return runtime.ErrorWithBackoff(patchClusterStatus()) } else { meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing) } + if err == nil { + backupsSpecFound, backupsReconciliationAllowed, err = r.BackupsEnabled(ctx, cluster) + + // If we cannot reconcile because the backup reconciliation is paused, set a condition and exit + if !backupsReconciliationAllowed { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: v1beta1.PostgresClusterProgressing, + Status: metav1.ConditionFalse, + Reason: "Paused", + Message: "Reconciliation is paused: please fill in spec.backups " + + "or add the postgres-operator.crunchydata.com/authorizeBackupRemoval " + + "annotation to authorize backup removal.", + + ObservedGeneration: cluster.GetGeneration(), + }) + return runtime.ErrorWithBackoff(patchClusterStatus()) + } else { + meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing) + } + } + pgHBAs := postgres.NewHBAs() pgmonitor.PostgreSQLHBAs(cluster, &pgHBAs) pgbouncer.PostgreSQL(cluster, &pgHBAs) pgParameters := postgres.NewParameters() pgaudit.PostgreSQLParameters(&pgParameters) - pgbackrest.PostgreSQL(cluster, &pgParameters) + pgbackrest.PostgreSQL(cluster, &pgParameters, backupsSpecFound) pgmonitor.PostgreSQLParameters(cluster, &pgParameters) // Set huge_pages = try if a hugepages resource limit > 0, otherwise set "off" @@ -255,10 +250,9 @@ func (r *Reconciler) Reconcile( // return a bool indicating that the controller should return early while any // required Jobs are running, after which it will indicate that an early // return is no longer needed, and reconciliation can proceed normally. - var returnEarly bool - returnEarly, err = r.reconcileDirMoveJobs(ctx, cluster) + returnEarly, err := r.reconcileDirMoveJobs(ctx, cluster) if err != nil || returnEarly { - return patchClusterStatus() + return runtime.ErrorWithBackoff(errors.Join(err, patchClusterStatus())) } } if err == nil { @@ -270,8 +264,14 @@ func (r *Reconciler) Reconcile( if err == nil { instances, err = r.observeInstances(ctx, cluster) } + + result := reconcile.Result{} + if err == nil { - err = updateResult(r.reconcilePatroniStatus(ctx, cluster, instances)) + var requeue time.Duration + if requeue, err = r.reconcilePatroniStatus(ctx, cluster, instances); err == nil && requeue > 0 { + result.RequeueAfter = requeue + } } if err == nil { err = r.reconcilePatroniSwitchover(ctx, cluster, instances) @@ -300,10 +300,9 @@ func (r *Reconciler) Reconcile( // the controller should return early while data initialization is in progress, after // which it will indicate that an early return is no longer needed, and reconciliation // can proceed normally. - var returnEarly bool - returnEarly, err = r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA) + returnEarly, err := r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA, backupsSpecFound) if err != nil || returnEarly { - return patchClusterStatus() + return runtime.ErrorWithBackoff(errors.Join(err, patchClusterStatus())) } } if err == nil { @@ -343,7 +342,9 @@ func (r *Reconciler) Reconcile( err = r.reconcileInstanceSets( ctx, cluster, clusterConfigMap, clusterReplicationSecret, rootCA, clusterPodService, instanceServiceAccount, instances, patroniLeaderService, - primaryCertificate, clusterVolumes, exporterQueriesConfig, exporterWebConfig) + primaryCertificate, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, + ) } if err == nil { @@ -354,7 +355,20 @@ func (r *Reconciler) Reconcile( } if err == nil { - err = updateResult(r.reconcilePGBackRest(ctx, cluster, instances, rootCA)) + var next reconcile.Result + if next, err = r.reconcilePGBackRest(ctx, cluster, + instances, rootCA, backupsSpecFound); err == nil && !next.IsZero() { + result.Requeue = result.Requeue || next.Requeue + if next.RequeueAfter > 0 { + result.RequeueAfter = next.RequeueAfter + } + } + } + if err == nil { + dedicatedSnapshotPVC, err = r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + } + if err == nil { + err = r.reconcileVolumeSnapshots(ctx, cluster, dedicatedSnapshotPVC) } if err == nil { err = r.reconcilePGBouncer(ctx, cluster, instances, primaryCertificate, rootCA) @@ -380,7 +394,7 @@ func (r *Reconciler) Reconcile( log.V(1).Info("reconciled cluster") - return patchClusterStatus() + return result, errors.Join(err, patchClusterStatus()) } // deleteControlled safely deletes object when it is controlled by cluster. @@ -458,24 +472,16 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { } } - var opts controller.Options - - // TODO(cbandy): Move this to main with controller-runtime v0.9+ - // - https://github.com/kubernetes-sigs/controller-runtime/commit/82fc2564cf - if s := os.Getenv("PGO_WORKERS"); s != "" { - if i, err := strconv.Atoi(s); err == nil && i > 0 { - opts.MaxConcurrentReconciles = i - } else { - mgr.GetLogger().Error(err, "PGO_WORKERS must be a positive number") + if r.DiscoveryClient == nil { + var err error + r.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) + if err != nil { + return err } } - if opts.MaxConcurrentReconciles == 0 { - opts.MaxConcurrentReconciles = 2 - } return builder.ControllerManagedBy(mgr). For(&v1beta1.PostgresCluster{}). - WithOptions(opts). Owns(&corev1.ConfigMap{}). Owns(&corev1.Endpoints{}). Owns(&corev1.PersistentVolumeClaim{}). @@ -489,8 +495,33 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { Owns(&rbacv1.RoleBinding{}). Owns(&batchv1.CronJob{}). Owns(&policyv1.PodDisruptionBudget{}). - Watches(&source.Kind{Type: &corev1.Pod{}}, r.watchPods()). - Watches(&source.Kind{Type: &appsv1.StatefulSet{}}, + Watches(&corev1.Pod{}, r.watchPods()). + Watches(&appsv1.StatefulSet{}, r.controllerRefHandlerFuncs()). // watch all StatefulSets Complete(r) } + +// GroupVersionKindExists checks to see whether a given Kind for a given +// GroupVersion exists in the Kubernetes API Server. +func (r *Reconciler) GroupVersionKindExists(groupVersion, kind string) (*bool, error) { + if r.DiscoveryClient == nil { + return initialize.Bool(false), nil + } + + resourceList, err := r.DiscoveryClient.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + if apierrors.IsNotFound(err) { + return initialize.Bool(false), nil + } + + return nil, err + } + + for _, resource := range resourceList.APIResources { + if resource.Kind == kind { + return initialize.Bool(true), nil + } + } + + return initialize.Bool(false), nil +} diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index 072605fb29..8c4a34189f 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -192,23 +181,21 @@ func (r *Reconciler) releaseObject(ctx context.Context, // StatefulSets within the cluster as needed to manage controller ownership refs. func (r *Reconciler) controllerRefHandlerFuncs() *handler.Funcs { - // var err error - ctx := context.Background() - log := logging.FromContext(ctx) + log := logging.FromContext(context.Background()) errMsg := "managing StatefulSet controller refs" return &handler.Funcs{ - CreateFunc: func(updateEvent event.CreateEvent, workQueue workqueue.RateLimitingInterface) { + CreateFunc: func(ctx context.Context, updateEvent event.CreateEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.Object); err != nil { log.Error(err, errMsg) } }, - UpdateFunc: func(updateEvent event.UpdateEvent, workQueue workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, updateEvent event.UpdateEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.ObjectNew); err != nil { log.Error(err, errMsg) } }, - DeleteFunc: func(updateEvent event.DeleteEvent, workQueue workqueue.RateLimitingInterface) { + DeleteFunc: func(ctx context.Context, updateEvent event.DeleteEvent, workQueue workqueue.RateLimitingInterface) { if err := r.manageControllerRefs(ctx, updateEvent.Object); err != nil { log.Error(err, errMsg) } diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index c03745fa12..8543fe390d 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 95c1513475..e6fdc5cb86 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -43,7 +32,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -143,9 +131,6 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Namespace.Name = "postgres-operator-test-" + rand.String(6) Expect(suite.Client.Create(ctx, test.Namespace)).To(Succeed()) - // Initialize the feature gate - Expect(util.AddAndSetFeatureGates("")).To(Succeed()) - test.Recorder = record.NewFakeRecorder(100) test.Recorder.IncludeObject = true diff --git a/internal/controller/postgrescluster/delete.go b/internal/controller/postgrescluster/delete.go index fdc85f73b1..63fc007f40 100644 --- a/internal/controller/postgrescluster/delete.go +++ b/internal/controller/postgrescluster/delete.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/helpers_test.go b/internal/controller/postgrescluster/helpers_test.go index 7e9d6af0b0..0536b466d4 100644 --- a/internal/controller/postgrescluster/helpers_test.go +++ b/internal/controller/postgrescluster/helpers_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -22,6 +11,7 @@ import ( "testing" "time" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,7 +22,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -63,11 +53,6 @@ func init() { } } -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} - // setupKubernetes starts or connects to a Kubernetes API and returns a client // that uses it. See [require.Kubernetes] for more details. func setupKubernetes(t testing.TB) (*rest.Config, client.Client) { @@ -109,13 +94,14 @@ func testVolumeClaimSpec() corev1.PersistentVolumeClaimSpec { // Defines a volume claim spec that can be used to create instances return corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, }, } } + func testCluster() *v1beta1.PostgresCluster { // Defines a base cluster spec that can be used by tests to generate a // cluster with an expected number of instances @@ -155,18 +141,75 @@ func testCluster() *v1beta1.PostgresCluster { return cluster.DeepCopy() } +func testBackupJob(cluster *v1beta1.PostgresCluster) *batchv1.Job { + job := batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "backup-job-1", + Namespace: cluster.Namespace, + Labels: map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelPGBackRestBackup: "", + naming.LabelPGBackRestRepo: "repo1", + }, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + + return job.DeepCopy() +} + +func testRestoreJob(cluster *v1beta1.PostgresCluster) *batchv1.Job { + job := batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "restore-job-1", + Namespace: cluster.Namespace, + Labels: naming.PGBackRestRestoreJobLabels(cluster.Name), + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "test", Image: "test"}}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + + return job.DeepCopy() +} + // setupManager creates the runtime manager used during controller testing func setupManager(t *testing.T, cfg *rest.Config, controllerSetup func(mgr manager.Manager)) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + + // Disable health endpoints + options := runtime.Options{} + options.HealthProbeBindAddress = "0" + options.Metrics.BindAddress = "0" - mgr, err := runtime.CreateRuntimeManager("", cfg, true) + mgr, err := runtime.NewManager(cfg, options) if err != nil { t.Fatal(err) } controllerSetup(mgr) - ctx, cancel := context.WithCancel(context.Background()) go func() { if err := mgr.Start(ctx); err != nil { t.Error(err) diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 0abe3ada9b..66321cc738 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -29,6 +18,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -38,6 +28,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -209,7 +201,7 @@ type observedInstances struct { byName map[string]*Instance bySet map[string][]*Instance forCluster []*Instance - setNames sets.String + setNames sets.Set[string] } // newObservedInstances builds an observedInstances from Kubernetes API objects. @@ -221,7 +213,7 @@ func newObservedInstances( observed := observedInstances{ byName: make(map[string]*Instance), bySet: make(map[string][]*Instance), - setNames: make(sets.String), + setNames: make(sets.Set[string]), } sets := make(map[string]*v1beta1.PostgresInstanceSetSpec) @@ -302,6 +294,8 @@ func (r *Reconciler) observeInstances( pods := &corev1.PodList{} runners := &appsv1.StatefulSetList{} + autogrow := feature.Enabled(ctx, feature.AutoGrowVolumes) + selector, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( @@ -320,13 +314,28 @@ func (r *Reconciler) observeInstances( observed := newObservedInstances(cluster, runners.Items, pods.Items) + // Save desired volume size values in case the status is removed. + // This may happen in cases where the Pod is restarted, the cluster + // is shutdown, etc. Only save values for instances defined in the spec. + previousDesiredRequests := make(map[string]string) + if autogrow { + for _, statusIS := range cluster.Status.InstanceSets { + if statusIS.DesiredPGDataVolume != nil { + for k, v := range statusIS.DesiredPGDataVolume { + previousDesiredRequests[k] = v + } + } + } + } + // Fill out status sorted by set name. cluster.Status.InstanceSets = cluster.Status.InstanceSets[:0] - for _, name := range observed.setNames.List() { + for _, name := range sets.List(observed.setNames) { status := v1beta1.PostgresInstanceSetStatus{Name: name} + status.DesiredPGDataVolume = make(map[string]string) for _, instance := range observed.bySet[name] { - status.Replicas += int32(len(instance.Pods)) + status.Replicas += int32(len(instance.Pods)) //nolint:gosec if ready, known := instance.IsReady(); known && ready { status.ReadyReplicas++ @@ -334,6 +343,26 @@ func (r *Reconciler) observeInstances( if matches, known := instance.PodMatchesPodTemplate(); known && matches { status.UpdatedReplicas++ } + if autogrow { + // Store desired pgData volume size for each instance Pod. + // The 'suggested-pgdata-pvc-size' annotation value is stored in the PostgresCluster + // status so that 1) it is available to the function 'reconcilePostgresDataVolume' + // and 2) so that the value persists after Pod restart and cluster shutdown events. + for _, pod := range instance.Pods { + // don't set an empty status + if pod.Annotations["suggested-pgdata-pvc-size"] != "" { + status.DesiredPGDataVolume[instance.Name] = pod.Annotations["suggested-pgdata-pvc-size"] + } + } + } + } + + // If autogrow is enabled, get the desired volume size for each instance. + if autogrow { + for _, instance := range observed.bySet[name] { + status.DesiredPGDataVolume[instance.Name] = r.storeDesiredRequest(ctx, cluster, + name, status.DesiredPGDataVolume[instance.Name], previousDesiredRequests[instance.Name]) + } } cluster.Status.InstanceSets = append(cluster.Status.InstanceSets, status) @@ -342,6 +371,67 @@ func (r *Reconciler) observeInstances( return observed, err } +// storeDesiredRequest saves the appropriate request value to the PostgresCluster +// status. If the value has grown, create an Event. +func (r *Reconciler) storeDesiredRequest( + ctx context.Context, cluster *v1beta1.PostgresCluster, + instanceSetName, desiredRequest, desiredRequestBackup string, +) string { + var current resource.Quantity + var previous resource.Quantity + var err error + log := logging.FromContext(ctx) + + // Parse the desired request from the cluster's status. + if desiredRequest != "" { + current, err = resource.ParseQuantity(desiredRequest) + if err != nil { + log.Error(err, "Unable to parse pgData volume request from status ("+ + desiredRequest+") for "+cluster.Name+"/"+instanceSetName) + // If there was an error parsing the value, treat as unset (equivalent to zero). + desiredRequest = "" + current, _ = resource.ParseQuantity("") + + } + } + + // Parse the desired request from the status backup. + if desiredRequestBackup != "" { + previous, err = resource.ParseQuantity(desiredRequestBackup) + if err != nil { + log.Error(err, "Unable to parse pgData volume request from status backup ("+ + desiredRequestBackup+") for "+cluster.Name+"/"+instanceSetName) + // If there was an error parsing the value, treat as unset (equivalent to zero). + desiredRequestBackup = "" + previous, _ = resource.ParseQuantity("") + + } + } + + // Determine if the limit is set for this instance set. + var limitSet bool + for _, specInstance := range cluster.Spec.InstanceSets { + if specInstance.Name == instanceSetName { + limitSet = !specInstance.DataVolumeClaimSpec.Resources.Limits.Storage().IsZero() + } + } + + if limitSet && current.Value() > previous.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeAutoGrow", + "pgData volume expansion to %v requested for %s/%s.", + current.String(), cluster.Name, instanceSetName) + } + + // If the desired size was not observed, update with previously stored value. + // This can happen in scenarios where the annotation on the Pod is missing + // such as when the cluster is shutdown or a Pod is in the middle of a restart. + if desiredRequest == "" { + desiredRequest = desiredRequestBackup + } + + return desiredRequest +} + // +kubebuilder:rbac:groups="",resources="pods",verbs={list} // +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={patch} @@ -402,7 +492,7 @@ func (r *Reconciler) deleteInstances( // mistake that something else is deleting objects. Use RequeueAfter to // avoid being rate-limited due to a deluge of delete events. if err != nil { - result.RequeueAfter = 10 * time.Second + result = runtime.RequeueWithoutBackoff(10 * time.Second) } return client.IgnoreNotFound(err) } @@ -503,6 +593,7 @@ func (r *Reconciler) reconcileInstanceSets( primaryCertificate *corev1.SecretProjection, clusterVolumes []corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) error { // Go through the observed instances and check if a primary has been determined. @@ -539,7 +630,9 @@ func (r *Reconciler) reconcileInstanceSets( rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, findAvailableInstanceNames(*set, instances, clusterVolumes), - numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig) + numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, + ) if err == nil { err = r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, set) @@ -591,7 +684,7 @@ func (r *Reconciler) cleanupPodDisruptionBudgets( } if err == nil { - setNames := sets.String{} + setNames := sets.Set[string]{} for _, set := range cluster.Spec.InstanceSets { setNames.Insert(set.Name) } @@ -692,7 +785,7 @@ func (r *Reconciler) rolloutInstance( pod := instance.Pods[0] exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } primary, known := instance.IsPrimary() @@ -978,6 +1071,7 @@ func (r *Reconciler) scaleUpInstances( numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) ([]*appsv1.StatefulSet, error) { log := logging.FromContext(ctx) @@ -1022,6 +1116,7 @@ func (r *Reconciler) scaleUpInstances( rootCA, clusterPodService, instanceServiceAccount, patroniLeaderService, primaryCertificate, instances[i], numInstancePods, clusterVolumes, exporterQueriesConfig, exporterWebConfig, + backupsSpecFound, ) } if err == nil { @@ -1051,6 +1146,7 @@ func (r *Reconciler) reconcileInstance( numInstancePods int, clusterVolumes []corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, + backupsSpecFound bool, ) error { log := logging.FromContext(ctx).WithValues("instance", instance.Name) ctx = logging.NewContext(ctx, log) @@ -1082,7 +1178,7 @@ func (r *Reconciler) reconcileInstance( ctx, cluster, spec, instance, rootCA) } if err == nil { - postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes) + postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes, nil) } if err == nil { postgresWALVolume, err = r.reconcilePostgresWALVolume(ctx, cluster, spec, instance, observed, clusterVolumes) @@ -1097,8 +1193,10 @@ func (r *Reconciler) reconcileInstance( postgresDataVolume, postgresWALVolume, tablespaceVolumes, &instance.Spec.Template.Spec) - addPGBackRestToInstancePodSpec( - cluster, instanceCertificates, &instance.Spec.Template.Spec) + if backupsSpecFound { + addPGBackRestToInstancePodSpec( + ctx, cluster, instanceCertificates, &instance.Spec.Template.Spec) + } err = patroni.InstancePod( ctx, cluster, clusterConfigMap, clusterPodService, patroniLeaderService, @@ -1107,7 +1205,7 @@ func (r *Reconciler) reconcileInstance( // Add pgMonitor resources to the instance Pod spec if err == nil { - err = addPGMonitorToInstancePodSpec(cluster, &instance.Spec.Template, exporterQueriesConfig, exporterWebConfig) + err = addPGMonitorToInstancePodSpec(ctx, cluster, &instance.Spec.Template, exporterQueriesConfig, exporterWebConfig) } // add nss_wrapper init container and add nss_wrapper env vars to the database and pgbackrest @@ -1200,15 +1298,11 @@ func generateInstanceStatefulSetIntent(_ context.Context, sts.Spec.Template.Spec.Affinity = spec.Affinity sts.Spec.Template.Spec.Tolerations = spec.Tolerations sts.Spec.Template.Spec.TopologySpreadConstraints = spec.TopologySpreadConstraints - if spec.PriorityClassName != nil { - sts.Spec.Template.Spec.PriorityClassName = *spec.PriorityClassName - } + sts.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(spec.PriorityClassName) // if default pod scheduling is not explicitly disabled, add the default // pod topology spread constraints - if cluster.Spec.DisableDefaultPodScheduling == nil || - (cluster.Spec.DisableDefaultPodScheduling != nil && - !*cluster.Spec.DisableDefaultPodScheduling) { + if !initialize.FromPointer(cluster.Spec.DisableDefaultPodScheduling) { sts.Spec.Template.Spec.TopologySpreadConstraints = append( sts.Spec.Template.Spec.TopologySpreadConstraints, defaultTopologySpreadConstraints( @@ -1271,13 +1365,12 @@ func generateInstanceStatefulSetIntent(_ context.Context, // addPGBackRestToInstancePodSpec adds pgBackRest configurations and sidecars // to the PodSpec. -func addPGBackRestToInstancePodSpec(cluster *v1beta1.PostgresCluster, +func addPGBackRestToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, instanceCertificates *corev1.Secret, instancePod *corev1.PodSpec, ) { - if pgbackrest.DedicatedRepoHostEnabled(cluster) { - pgbackrest.AddServerToInstancePod(cluster, instancePod, - instanceCertificates.Name) - } + pgbackrest.AddServerToInstancePod(ctx, cluster, instancePod, + instanceCertificates.Name) pgbackrest.AddConfigToInstancePod(cluster, instancePod) } diff --git a/internal/controller/postgrescluster/instance.md b/internal/controller/postgrescluster/instance.md index 933ca9bbe3..f0de4c5d7a 100644 --- a/internal/controller/postgrescluster/instance.md +++ b/internal/controller/postgrescluster/instance.md @@ -1,16 +1,7 @@ ## Shutdown and Startup Logic Detail diff --git a/internal/controller/postgrescluster/instance_rollout_test.go b/internal/controller/postgrescluster/instance_rollout_test.go index 30e680c3e0..e668907497 100644 --- a/internal/controller/postgrescluster/instance_rollout_test.go +++ b/internal/controller/postgrescluster/instance_rollout_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -75,7 +64,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { execCalls := 0 reconciler.PodExec = func( - namespace, pod, container string, stdin io.Reader, _, _ io.Writer, command ...string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, _, _ io.Writer, command ...string, ) error { execCalls++ @@ -134,7 +123,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { reconciler := &Reconciler{} reconciler.Tracer = otel.Tracer(t.Name()) reconciler.PodExec = func( - namespace, pod, container string, _ io.Reader, stdout, _ io.Writer, command ...string, + ctx context.Context, namespace, pod, container string, _ io.Reader, stdout, _ io.Writer, command ...string, ) error { execCalls++ @@ -162,7 +151,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { reconciler := &Reconciler{} reconciler.Tracer = otel.Tracer(t.Name()) reconciler.PodExec = func( - _, _, _ string, _ io.Reader, _, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, _, _ io.Writer, _ ...string, ) error { // Nothing useful in stdout. return nil diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 06e38c055b..f7f59f50a5 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -24,6 +13,7 @@ import ( "testing" "time" + "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" "go.opentelemetry.io/otel" @@ -38,15 +28,19 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -184,7 +178,7 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["missing"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"missing"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"missing"}) }) t.Run("RunnerMissingOthers", func(t *testing.T) { @@ -217,7 +211,7 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its name and labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["missing"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"missing"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"missing"}) }) t.Run("Matching", func(t *testing.T) { @@ -262,7 +256,122 @@ func TestNewObservedInstances(t *testing.T) { // Lookup based on its name and labels. assert.Equal(t, observed.byName["the-name"], instance) assert.DeepEqual(t, observed.bySet["00"], []*Instance{instance}) - assert.DeepEqual(t, observed.setNames.List(), []string{"00"}) + assert.DeepEqual(t, sets.List(observed.setNames), []string{"00"}) + }) +} + +func TestStoreDesiredRequest(t *testing.T) { + ctx := context.Background() + + setupLogCapture := func(ctx context.Context) (context.Context, *[]string) { + calls := []string{} + testlog := funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + }) + return logging.NewContext(ctx, testlog), &calls + } + + cluster := v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rhino", + Namespace: "test-namespace", + }, + Spec: v1beta1.PostgresClusterSpec{ + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "red", + Replicas: initialize.Int32(1), + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }}}, + }, { + Name: "blue", + Replicas: initialize.Int32(1), + }}}} + + t.Run("BadRequestNoBackup", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "woot", "") + + assert.Equal(t, value, "") + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status")) + }) + + t.Run("BadRequestWithBackup", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "foo", "1Gi") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status (foo) for rhino/red")) + }) + + t.Run("NoLimitNoEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "blue", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 0) + }) + + t.Run("BadBackupRequest", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "2Gi", "bar") + + assert.Equal(t, value, "2Gi") + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse pgData volume request from status backup (bar) for rhino/red")) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") + assert.Equal(t, recorder.Events[0].Note, "pgData volume expansion to 2Gi requested for rhino/red.") + }) + + t.Run("ValueUpdateWithEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "red", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeAutoGrow") + assert.Equal(t, recorder.Events[0].Note, "pgData volume expansion to 1Gi requested for rhino/red.") + }) + + t.Run("NoLimitNoEvent", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + value := reconciler.storeDesiredRequest(ctx, &cluster, "blue", "1Gi", "") + + assert.Equal(t, value, "1Gi") + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 0) }) } @@ -415,8 +524,9 @@ func TestWritablePod(t *testing.T) { } func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=false"))) + t.Parallel() + ctx := context.Background() cluster := v1beta1.PostgresCluster{} cluster.Name = "hippo" cluster.Default() @@ -441,14 +551,14 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { cluster.Spec.Backups.PGBackRest.Repos = nil out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(cluster, &certificates, out) + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only database container has mounts. // Other containers are ignored. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} volumeMounts: @@ -457,14 +567,104 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { readOnly: true - name: other resources: {} +- command: + - pgbackrest + - server + livenessProbe: + exec: + command: + - pgbackrest + - server-ping + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /pgdata + name: postgres-data + - mountPath: /pgwal + name: postgres-wal + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true +- command: + - bash + - -ceu + - -- + - |- + monitor() { + exec {fd}<> <(:||:) + until read -r -t 5 -u "${fd}"; do + if + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --dereference --format='Loaded configuration dated %y' "${filename}" + elif + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] + } && + pkill -HUP --exact --parent=0 pgbackrest + then + exec {fd}>&- && exec {fd}<> <(:||:) + stat --format='Loaded certificates dated %y' "${directory}" + fi + done + }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor + - pgbackrest-config + - /etc/pgbackrest/server + - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt + - /etc/pgbackrest/conf.d/~postgres-operator_server.conf + name: pgbackrest-config + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/server + name: pgbackrest-server + readOnly: true + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true `)) - // Instance configuration files but no certificates. + // Instance configuration files with certificates. // Other volumes are ignored. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: other - name: postgres-data - name: postgres-wal +- name: pgbackrest-server + projected: + sources: + - secret: + items: + - key: pgbackrest-server.crt + path: server-tls.crt + - key: pgbackrest-server.key + mode: 384 + path: server-tls.key + name: some-secret - name: pgbackrest-config projected: sources: @@ -474,7 +674,19 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest `)) }) @@ -486,7 +698,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { // Instance configuration files plus client and server certificates. // The server certificate comes from the instance Secret. // Other volumes are untouched. - assert.Assert(t, marshalMatches(result.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(result.Volumes, ` - name: other - name: postgres-data - name: postgres-wal @@ -523,7 +735,6 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest - optional: true `)) } @@ -536,12 +747,12 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { } out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(cluster, &certificates, out) + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) alwaysExpect(t, out) // The TLS server is added and configuration mounted. // It has PostgreSQL volumes mounted while other volumes are ignored. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} volumeMounts: @@ -587,21 +798,21 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -648,7 +859,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { before := out.DeepCopy() out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(cluster, &certificates, out) + addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) alwaysExpect(t, out) // Only the TLS server container changed. @@ -657,7 +868,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { assert.DeepEqual(t, before.Containers[:2], out.Containers[:2]) // It has the custom resources. - assert.Assert(t, marshalMatches(out.Containers[2:], ` + assert.Assert(t, cmp.MarshalMatches(out.Containers[2:], ` - command: - pgbackrest - server @@ -699,21 +910,21 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -1132,9 +1343,6 @@ func TestDeleteInstance(t *testing.T) { Tracer: otel.Tracer(t.Name()), } - // Initialize the feature gate - assert.NilError(t, util.AddAndSetFeatureGates("")) - // Define, Create, and Reconcile a cluster to get an instance running in kube cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -1185,8 +1393,9 @@ func TestDeleteInstance(t *testing.T) { for _, gvk := range gvks { t.Run(gvk.Kind, func(t *testing.T) { - uList := &unstructured.UnstructuredList{} - err := wait.Poll(time.Second*3, Scale(time.Second*30), func() (bool, error) { + ctx := context.Background() + err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { + uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) assert.NilError(t, errors.WithStack(reconciler.Client.List(ctx, uList, client.InNamespace(cluster.Namespace), @@ -1356,7 +1565,7 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { name: "check default scheduling constraints are added", run: func(t *testing.T, ss *appsv1.StatefulSet) { assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 2) - assert.Assert(t, marshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` - labelSelector: matchExpressions: - key: postgres-operator.crunchydata.com/data @@ -1403,7 +1612,7 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { }, run: func(t *testing.T, ss *appsv1.StatefulSet) { assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 3) - assert.Assert(t, marshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, ` - labelSelector: matchExpressions: - key: postgres-operator.crunchydata.com/cluster @@ -1486,7 +1695,7 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { }, run: func(t *testing.T, ss *appsv1.StatefulSet) { assert.Equal(t, len(ss.Spec.Template.Spec.TopologySpreadConstraints), 1) - assert.Assert(t, marshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, + assert.Assert(t, cmp.MarshalMatches(ss.Spec.Template.Spec.TopologySpreadConstraints, `- labelSelector: matchExpressions: - key: postgres-operator.crunchydata.com/cluster @@ -1763,7 +1972,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringInt32(0) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, !foundPDB(cluster, spec)) }) @@ -1772,7 +1981,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringInt32(1) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -1781,7 +1990,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster, spec)) t.Run("deleted", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringInt32(0) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -1799,7 +2008,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringString("50%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -1808,7 +2017,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster, spec)) t.Run("deleted", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringString("0%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("0%")) err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -1822,13 +2031,13 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { }) t.Run("delete with 00%", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringString("50%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) t.Run("deleted", func(t *testing.T) { - spec.MinAvailable = initialize.IntOrStringString("00%") + spec.MinAvailable = initialize.Pointer(intstr.FromString("00%")) err := r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -1901,13 +2110,13 @@ func TestCleanupDisruptionBudgets(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name spec := &cluster.Spec.InstanceSets[0] - spec.MinAvailable = initialize.IntOrStringInt32(1) + spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) expectedPDB := generatePDB(t, cluster, spec, - initialize.IntOrStringInt32(1)) + initialize.Pointer(intstr.FromInt32(1))) assert.NilError(t, createPDB(expectedPDB)) t.Run("no instances were removed", func(t *testing.T) { @@ -1920,7 +2129,7 @@ func TestCleanupDisruptionBudgets(t *testing.T) { leftoverPDB := generatePDB(t, cluster, &v1beta1.PostgresInstanceSetSpec{ Name: "old-instance", Replicas: initialize.Int32(1), - }, initialize.IntOrStringInt32(1)) + }, initialize.Pointer(intstr.FromInt32(1))) assert.NilError(t, createPDB(leftoverPDB)) assert.Assert(t, foundPDB(expectedPDB)) diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index d6be469a2b..1c5ac93eed 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -26,7 +15,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" @@ -103,7 +91,7 @@ func (r *Reconciler) handlePatroniRestarts( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { pod := primaryNeedsRestart.Pods[0] - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) }) return errors.WithStack(exec.RestartPendingMembers(ctx, "master", naming.PatroniScope(cluster))) @@ -128,7 +116,7 @@ func (r *Reconciler) handlePatroniRestarts( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { pod := replicaNeedsRestart.Pods[0] - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) }) return errors.WithStack(exec.RestartPendingMembers(ctx, "replica", naming.PatroniScope(cluster))) @@ -212,8 +200,8 @@ func (r *Reconciler) reconcilePatroniDynamicConfiguration( // NOTE(cbandy): Despite the guards above, calling PodExec may still fail // due to a missing or stopped container. - exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + exec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } var configuration map[string]any @@ -286,6 +274,8 @@ func (r *Reconciler) generatePatroniLeaderLeaseService( } servicePort.NodePort = *spec.NodePort } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy } service.Spec.Ports = []corev1.ServicePort{servicePort} @@ -318,8 +308,8 @@ func (r *Reconciler) reconcilePatroniLeaderLease( func (r *Reconciler) reconcilePatroniStatus( ctx context.Context, cluster *v1beta1.PostgresCluster, observedInstances *observedInstances, -) (reconcile.Result, error) { - result := reconcile.Result{} +) (time.Duration, error) { + var requeue time.Duration log := logging.FromContext(ctx) var readyInstance bool @@ -346,12 +336,11 @@ func (r *Reconciler) reconcilePatroniStatus( // is detected in the cluster we assume this is the case, and simply log a message and // requeue in order to try again until the expected value is found. log.Info("detected ready instance but no initialize value") - result.RequeueAfter = 1 * time.Second - return result, nil + requeue = time.Second } } - return result, err + return requeue, err } // reconcileReplicationSecret creates a secret containing the TLS @@ -535,7 +524,7 @@ func (r *Reconciler) reconcilePatroniSwitchover(ctx context.Context, } exec := func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(runningPod.Namespace, runningPod.Name, naming.ContainerDatabase, stdin, + return r.PodExec(ctx, runningPod.Namespace, runningPod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index c6c82c53b8..b2a457685b 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -34,10 +23,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -57,11 +46,11 @@ func TestGeneratePatroniLeaderLeaseService(t *testing.T) { cluster.Spec.Port = initialize.Int32(9876) alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg2 @@ -89,7 +78,7 @@ ownerReferences: alwaysExpect(t, service) // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 9876 protocol: TCP @@ -178,7 +167,7 @@ ownerReferences: assert.NilError(t, err) alwaysExpect(t, service) test.Expect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres port: 9876 protocol: TCP @@ -203,7 +192,7 @@ ownerReferences: assert.NilError(t, err) alwaysExpect(t, service) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres nodePort: 32001 port: 9876 @@ -216,7 +205,7 @@ ownerReferences: assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) assert.NilError(t, err) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: postgres nodePort: 32002 port: 9876 @@ -524,13 +513,13 @@ func TestReconcilePatroniStatus(t *testing.T) { t.Run(fmt.Sprintf("%+v", tc), func(t *testing.T) { postgresCluster, observedInstances := createResources(i, tc.readyReplicas, tc.writeAnnotation) - result, err := r.reconcilePatroniStatus(ctx, postgresCluster, observedInstances) + requeue, err := r.reconcilePatroniStatus(ctx, postgresCluster, observedInstances) if tc.requeueExpected { assert.NilError(t, err) - assert.Assert(t, result.RequeueAfter == 1*time.Second) + assert.Equal(t, requeue, time.Second) } else { assert.NilError(t, err) - assert.DeepEqual(t, result, reconcile.Result{}) + assert.Equal(t, requeue, time.Duration(0)) } }) } @@ -544,7 +533,7 @@ func TestReconcilePatroniSwitchover(t *testing.T) { var timelineCallNoLeader, timelineCall bool r := Reconciler{ Client: client, - PodExec: func(namespace, pod, container string, + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { called = true switch { diff --git a/internal/controller/postgrescluster/pgadmin.go b/internal/controller/postgrescluster/pgadmin.go index 0d7065e7ac..c0a936ba1f 100644 --- a/internal/controller/postgrescluster/pgadmin.go +++ b/internal/controller/postgrescluster/pgadmin.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -169,7 +158,7 @@ func (r *Reconciler) generatePGAdminService( // requires updates to the pgAdmin service configuration. servicePort := corev1.ServicePort{ Name: naming.PortPGAdmin, - Port: *initialize.Int32(5050), + Port: 5050, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(naming.PortPGAdmin), } @@ -192,6 +181,8 @@ func (r *Reconciler) generatePGAdminService( } servicePort.NodePort = *spec.NodePort } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy } service.Spec.Ports = []corev1.ServicePort{servicePort} @@ -303,11 +294,8 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // Use scheduling constraints from the cluster spec. sts.Spec.Template.Spec.Affinity = cluster.Spec.UserInterface.PGAdmin.Affinity sts.Spec.Template.Spec.Tolerations = cluster.Spec.UserInterface.PGAdmin.Tolerations - - if cluster.Spec.UserInterface.PGAdmin.PriorityClassName != nil { - sts.Spec.Template.Spec.PriorityClassName = *cluster.Spec.UserInterface.PGAdmin.PriorityClassName - } - + sts.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.UserInterface.PGAdmin.PriorityClassName) sts.Spec.Template.Spec.TopologySpreadConstraints = cluster.Spec.UserInterface.PGAdmin.TopologySpreadConstraints @@ -454,9 +442,9 @@ func (r *Reconciler) reconcilePGAdminUsers( ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } } if podExecutor == nil { diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index 5a2a3efb27..92ec6f42f1 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -152,7 +141,7 @@ func TestGeneratePGAdminService(t *testing.T) { assert.NilError(t, err) assert.Assert(t, !specified) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null name: my-cluster-pgadmin namespace: my-ns @@ -165,11 +154,11 @@ namespace: my-ns } alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: my-cluster @@ -263,7 +252,7 @@ ownerReferences: alwaysExpect(t, service) // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin port: 5050 protocol: TCP @@ -296,7 +285,7 @@ ownerReferences: assert.Assert(t, specified) alwaysExpect(t, service) test.Expect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin port: 5050 protocol: TCP @@ -321,7 +310,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin nodePort: 32001 port: 5050 @@ -334,7 +323,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgadmin nodePort: 32002 port: 5050 @@ -698,7 +687,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { assert.Equal(t, pvc.Labels[naming.LabelRole], naming.RolePGAdmin) assert.Equal(t, pvc.Labels[naming.LabelData], naming.DataPGAdmin) - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteOnce resources: @@ -753,6 +742,10 @@ func TestReconcilePGAdminUsers(t *testing.T) { t.Run("PodTerminating", func(t *testing.T) { pod := pod.DeepCopy() + // Must add finalizer when adding deletion timestamp otherwise fake client will panic: + // https://github.com/kubernetes-sigs/controller-runtime/pull/2316 + pod.Finalizers = append(pod.Finalizers, "some-finalizer") + pod.DeletionTimestamp = new(metav1.Time) *pod.DeletionTimestamp = metav1.Now() pod.Status.ContainerStatuses = @@ -781,7 +774,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { calls := 0 r.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -859,7 +852,7 @@ func pgAdminTestCluster(ns corev1.Namespace) *v1beta1.PostgresCluster { Volume: &v1beta1.RepoPVC{ VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -874,7 +867,7 @@ func pgAdminTestCluster(ns corev1.Namespace) *v1beta1.PostgresCluster { Image: "test-image", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index dcf903631d..836df047fc 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -19,6 +8,7 @@ import ( "context" "fmt" "io" + "reflect" "regexp" "sort" "strings" @@ -33,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -43,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -117,11 +107,14 @@ var regexRepoIndex = regexp.MustCompile(`\d+`) // RepoResources is used to store various resources for pgBackRest repositories and // repository hosts type RepoResources struct { + hosts []*appsv1.StatefulSet cronjobs []*batchv1.CronJob manualBackupJobs []*batchv1.Job replicaCreateBackupJobs []*batchv1.Job - hosts []*appsv1.StatefulSet pvcs []*corev1.PersistentVolumeClaim + sas []*corev1.ServiceAccount + roles []*rbacv1.Role + rolebindings []*rbacv1.RoleBinding } // applyRepoHostIntent ensures the pgBackRest repository host StatefulSet is synchronized with the @@ -134,7 +127,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v repoHostName string, repoResources *RepoResources, observedInstances *observedInstances) (*appsv1.StatefulSet, error) { - repo, err := r.generateRepoHostIntent(postgresCluster, repoHostName, repoResources, observedInstances) + repo, err := r.generateRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources, observedInstances) if err != nil { return nil, err } @@ -192,24 +185,44 @@ func (r *Reconciler) applyRepoVolumeIntent(ctx context.Context, return repo, nil } +// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={list} +// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={list} +// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={list} +// +kubebuilder:rbac:groups="",resources="configmaps",verbs={list} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list} +// +kubebuilder:rbac:groups="",resources="secrets",verbs={list} +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={list} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={list} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={list} + // getPGBackRestResources returns the existing pgBackRest resources that should utilized by the // PostgresCluster controller during reconciliation. Any items returned are verified to be owned // by the PostgresCluster controller and still applicable per the current PostgresCluster spec. -// Additionally, and resources identified that no longer correspond to any current configuration +// Additionally, any resources identified that no longer correspond to any current configuration // are deleted. func (r *Reconciler) getPGBackRestResources(ctx context.Context, - postgresCluster *v1beta1.PostgresCluster) (*RepoResources, error) { + postgresCluster *v1beta1.PostgresCluster, + backupsSpecFound bool, +) (*RepoResources, error) { repoResources := &RepoResources{} gvks := []schema.GroupVersionKind{{ - Group: corev1.SchemeGroupVersion.Group, - Version: corev1.SchemeGroupVersion.Version, - Kind: "ConfigMapList", + Group: appsv1.SchemeGroupVersion.Group, + Version: appsv1.SchemeGroupVersion.Version, + Kind: "StatefulSetList", + }, { + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, + Kind: "CronJobList", }, { Group: batchv1.SchemeGroupVersion.Group, Version: batchv1.SchemeGroupVersion.Version, Kind: "JobList", + }, { + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ConfigMapList", }, { Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, @@ -219,13 +232,17 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, Version: corev1.SchemeGroupVersion.Version, Kind: "SecretList", }, { - Group: appsv1.SchemeGroupVersion.Group, - Version: appsv1.SchemeGroupVersion.Version, - Kind: "StatefulSetList", + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ServiceAccountList", }, { - Group: batchv1.SchemeGroupVersion.Group, - Version: batchv1.SchemeGroupVersion.Version, - Kind: "CronJobList", + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Kind: "RoleList", + }, { + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Kind: "RoleBindingList", }} selector := naming.PGBackRestSelector(postgresCluster.GetName()) @@ -241,7 +258,7 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, continue } - owned, err := r.cleanupRepoResources(ctx, postgresCluster, uList.Items) + owned, err := r.cleanupRepoResources(ctx, postgresCluster, uList.Items, backupsSpecFound) if err != nil { return nil, errors.WithStack(err) } @@ -263,8 +280,11 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, } // +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={delete} +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={delete} // +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={delete} // +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={delete} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={delete} +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={delete} // cleanupRepoResources cleans up pgBackRest repository resources that should no longer be // reconciled by deleting them. This includes deleting repos (i.e. PersistentVolumeClaims) that @@ -272,7 +292,9 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, // pgBackRest repository host resources if a repository host is no longer configured. func (r *Reconciler) cleanupRepoResources(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, - ownedResources []unstructured.Unstructured) ([]unstructured.Unstructured, error) { + ownedResources []unstructured.Unstructured, + backupsSpecFound bool, +) ([]unstructured.Unstructured, error) { // stores the resources that should not be deleted ownedNoDelete := []unstructured.Unstructured{} @@ -287,11 +309,17 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, // spec switch { case hasLabel(naming.LabelPGBackRestConfig): + if !backupsSpecFound { + break + } // Simply add the things we never want to delete (e.g. the pgBackRest configuration) // to the slice and do not delete ownedNoDelete = append(ownedNoDelete, owned) delete = false case hasLabel(naming.LabelPGBackRestDedicated): + if !backupsSpecFound { + break + } // Any resources from before 5.1 that relate to the previously required // SSH configuration should be deleted. // TODO(tjmoore4): This can be removed once 5.0 is EOL. @@ -299,12 +327,13 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, owned.GetName() != naming.PGBackRestSSHSecret(postgresCluster).Name { // If a dedicated repo host resource and a dedicated repo host is enabled, then // add to the slice and do not delete. - if pgbackrest.DedicatedRepoHostEnabled(postgresCluster) { - ownedNoDelete = append(ownedNoDelete, owned) - delete = false - } + ownedNoDelete = append(ownedNoDelete, owned) + delete = false } case hasLabel(naming.LabelPGBackRestRepoVolume): + if !backupsSpecFound { + break + } // If a volume (PVC) is identified for a repo that no longer exists in the // spec then delete it. Otherwise add it to the slice and continue. for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -317,6 +346,9 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestBackup): + if !backupsSpecFound { + break + } // If a Job is identified for a repo that no longer exists in the spec then // delete it. Otherwise add it to the slice and continue. for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -326,6 +358,9 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestCronJob): + if !backupsSpecFound { + break + } for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { if repo.Name == owned.GetLabels()[naming.LabelPGBackRestRepo] { if backupScheduleFound(repo, @@ -337,6 +372,18 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, } } case hasLabel(naming.LabelPGBackRestRestore): + if !backupsSpecFound { + break + } + + // If the restore job has the PGBackRestBackupJobCompletion annotation, it is + // used for volume snapshots and should not be deleted (volume snapshots code + // will clean it up when appropriate). + if _, ok := owned.GetAnnotations()[naming.PGBackRestBackupJobCompletion]; ok { + ownedNoDelete = append(ownedNoDelete, owned) + delete = false + } + // When a cluster is prepared for restore, the system identifier is removed from status // and the cluster is therefore no longer bootstrapped. Only once the restore Job is // complete will the cluster then be bootstrapped again, which means by the time we @@ -346,6 +393,12 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, ownedNoDelete = append(ownedNoDelete, owned) delete = false } + case hasLabel(naming.LabelPGBackRest): + if !backupsSpecFound { + break + } + ownedNoDelete = append(ownedNoDelete, owned) + delete = false } // If nothing has specified that the resource should not be deleted, then delete @@ -385,6 +438,24 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, uList *unstructured.UnstructuredList) error { switch kind { + case "StatefulSetList": + var stsList appsv1.StatefulSetList + if err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uList.UnstructuredContent(), &stsList); err != nil { + return errors.WithStack(err) + } + for i := range stsList.Items { + repoResources.hosts = append(repoResources.hosts, &stsList.Items[i]) + } + case "CronJobList": + var cronList batchv1.CronJobList + if err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uList.UnstructuredContent(), &cronList); err != nil { + return errors.WithStack(err) + } + for i := range cronList.Items { + repoResources.cronjobs = append(repoResources.cronjobs, &cronList.Items[i]) + } case "JobList": var jobList batchv1.JobList if err := runtime.DefaultUnstructuredConverter. @@ -402,6 +473,9 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, append(repoResources.manualBackupJobs, &jobList.Items[i]) } } + case "ConfigMapList": + // Repository host now uses mTLS for encryption, authentication, and authorization. + // Configmaps for SSHD are no longer managed here. case "PersistentVolumeClaimList": var pvcList corev1.PersistentVolumeClaimList if err := runtime.DefaultUnstructuredConverter. @@ -411,34 +485,38 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, for i := range pvcList.Items { repoResources.pvcs = append(repoResources.pvcs, &pvcList.Items[i]) } - case "StatefulSetList": - var stsList appsv1.StatefulSetList + case "SecretList": + // Repository host now uses mTLS for encryption, authentication, and authorization. + // Secrets for SSHD are no longer managed here. + // TODO(tjmoore4): Consider adding all pgBackRest secrets to RepoResources to + // observe all pgBackRest secrets in one place. + case "ServiceAccountList": + var saList corev1.ServiceAccountList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &stsList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &saList); err != nil { return errors.WithStack(err) } - for i := range stsList.Items { - repoResources.hosts = append(repoResources.hosts, &stsList.Items[i]) + for i := range saList.Items { + repoResources.sas = append(repoResources.sas, &saList.Items[i]) } - case "CronJobList": - var cronList batchv1.CronJobList + case "RoleList": + var roleList rbacv1.RoleList if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &cronList); err != nil { + FromUnstructured(uList.UnstructuredContent(), &roleList); err != nil { return errors.WithStack(err) } - for i := range cronList.Items { - repoResources.cronjobs = append(repoResources.cronjobs, &cronList.Items[i]) + for i := range roleList.Items { + repoResources.roles = append(repoResources.roles, &roleList.Items[i]) + } + case "RoleBindingList": + var rb rbacv1.RoleBindingList + if err := runtime.DefaultUnstructuredConverter. + FromUnstructured(uList.UnstructuredContent(), &rb); err != nil { + return errors.WithStack(err) + } + for i := range rb.Items { + repoResources.rolebindings = append(repoResources.rolebindings, &rb.Items[i]) } - case "ConfigMapList": - // Repository host now uses mTLS for encryption, authentication, and authorization. - // Configmaps for SSHD are no longer managed here. - // TODO(tjmoore4): Consider adding all pgBackRest configs to RepoResources to - // observe all pgBackRest configs in one place. - case "SecretList": - // Repository host now uses mTLS for encryption, authentication, and authorization. - // Secrets for SSHD are no longer managed here. - // TODO(tjmoore4): Consider adding all pgBackRest secrets to RepoResources to - // observe all pgBackRest secrets in one place. default: return fmt.Errorf("unexpected kind %q", kind) } @@ -471,8 +549,9 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, for _, job := range jobList.Items { // we only care about the scheduled backup Jobs created by the // associated CronJobs - sbs := v1beta1.PGBackRestScheduledBackupStatus{} if job.GetLabels()[naming.LabelPGBackRestCronJob] != "" { + sbs := v1beta1.PGBackRestScheduledBackupStatus{} + if len(job.OwnerReferences) > 0 { sbs.CronJobName = job.OwnerReferences[0].Name } @@ -498,7 +577,7 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, // generateRepoHostIntent creates and populates StatefulSet with the PostgresCluster's full intent // as needed to create and reconcile a pgBackRest dedicated repository host within the kubernetes // cluster. -func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresCluster, +func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repoHostName string, repoResources *RepoResources, observedInstances *observedInstances, ) (*appsv1.StatefulSet, error) { @@ -542,16 +621,12 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu repo.Spec.Template.Spec.Affinity = repoHost.Affinity repo.Spec.Template.Spec.Tolerations = repoHost.Tolerations repo.Spec.Template.Spec.TopologySpreadConstraints = repoHost.TopologySpreadConstraints - if repoHost.PriorityClassName != nil { - repo.Spec.Template.Spec.PriorityClassName = *repoHost.PriorityClassName - } + repo.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(repoHost.PriorityClassName) } // if default pod scheduling is not explicitly disabled, add the default // pod topology spread constraints - if postgresCluster.Spec.DisableDefaultPodScheduling == nil || - (postgresCluster.Spec.DisableDefaultPodScheduling != nil && - !*postgresCluster.Spec.DisableDefaultPodScheduling) { + if !initialize.FromPointer(postgresCluster.Spec.DisableDefaultPodScheduling) { repo.Spec.Template.Spec.TopologySpreadConstraints = append( repo.Spec.Template.Spec.TopologySpreadConstraints, defaultTopologySpreadConstraints( @@ -613,16 +688,18 @@ func (r *Reconciler) generateRepoHostIntent(postgresCluster *v1beta1.PostgresClu repo.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) - pgbackrest.AddServerToRepoPod(postgresCluster, &repo.Spec.Template.Spec) + pgbackrest.AddServerToRepoPod(ctx, postgresCluster, &repo.Spec.Template.Spec) - // add the init container to make the pgBackRest repo volume log directory - pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + // add the init container to make the pgBackRest repo volume log directory + pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) - // add pgBackRest repo volumes to pod - if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, - getRepoPVCNames(postgresCluster, repoResources.pvcs), - naming.PGBackRestRepoContainerName); err != nil { - return nil, errors.WithStack(err) + // add pgBackRest repo volumes to pod + if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, + getRepoPVCNames(postgresCluster, repoResources.pvcs), + naming.PGBackRestRepoContainerName); err != nil { + return nil, errors.WithStack(err) + } } // add configs to pod pgbackrest.AddConfigToRepoPod(postgresCluster, &repo.Spec.Template.Spec) @@ -692,20 +769,20 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC } // generateBackupJobSpecIntent generates a JobSpec for a pgBackRest backup job -func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, +func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repo v1beta1.PGBackRestRepo, serviceAccountName string, - labels, annotations map[string]string, opts ...string) (*batchv1.JobSpec, error) { - - selector, containerName, err := getPGBackRestExecSelector(postgresCluster, repo) - if err != nil { - return nil, errors.WithStack(err) - } + labels, annotations map[string]string, opts ...string) *batchv1.JobSpec { repoIndex := regexRepoIndex.FindString(repo.Name) cmdOpts := []string{ "--stanza=" + pgbackrest.DefaultStanzaName, "--repo=" + repoIndex, } + // If VolumeSnapshots are enabled, use archive-copy and archive-check options + if postgresCluster.Spec.Backups.Snapshots != nil && feature.Enabled(ctx, feature.VolumeSnapshots) { + cmdOpts = append(cmdOpts, "--archive-copy=y", "--archive-check=y") + } + cmdOpts = append(cmdOpts, opts...) container := corev1.Container{ @@ -714,9 +791,9 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, {Name: "COMMAND", Value: "backup"}, {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, {Name: "COMPARE_HASH", Value: "true"}, - {Name: "CONTAINER", Value: containerName}, + {Name: "CONTAINER", Value: naming.PGBackRestRepoContainerName}, {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, - {Name: "SELECTOR", Value: selector.String()}, + {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, }, Image: config.PGBackRestContainerImage(postgresCluster), ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy, @@ -756,12 +833,10 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, // set the priority class name, tolerations, and affinity, if they exist if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil { - if postgresCluster.Spec.Backups.PGBackRest.Jobs.PriorityClassName != nil { - jobSpec.Template.Spec.PriorityClassName = - *postgresCluster.Spec.Backups.PGBackRest.Jobs.PriorityClassName - } jobSpec.Template.Spec.Tolerations = postgresCluster.Spec.Backups.PGBackRest.Jobs.Tolerations jobSpec.Template.Spec.Affinity = postgresCluster.Spec.Backups.PGBackRest.Jobs.Affinity + jobSpec.Template.Spec.PriorityClassName = + initialize.FromPointer(postgresCluster.Spec.Backups.PGBackRest.Jobs.PriorityClassName) } // Set the image pull secrets, if any exist. @@ -771,13 +846,9 @@ func generateBackupJobSpecIntent(postgresCluster *v1beta1.PostgresCluster, jobSpec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets // add pgBackRest configs to template - if containerName == naming.PGBackRestRepoContainerName { - pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) - } else { - pgbackrest.AddConfigToInstancePod(postgresCluster, &jobSpec.Template.Spec) - } + pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) - return jobSpec, nil + return jobSpec } // +kubebuilder:rbac:groups="",resources="configmaps",verbs={delete,list} @@ -1257,9 +1328,7 @@ func (r *Reconciler) generateRestoreJobIntent(cluster *v1beta1.PostgresCluster, job.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(cluster) // set the priority class name, if it exists - if dataSource.PriorityClassName != nil { - job.Spec.Template.Spec.PriorityClassName = *dataSource.PriorityClassName - } + job.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(dataSource.PriorityClassName) job.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) if err := errors.WithStack(r.setControllerReference(cluster, job)); err != nil { @@ -1277,13 +1346,15 @@ func (r *Reconciler) generateRestoreJobIntent(cluster *v1beta1.PostgresCluster, func (r *Reconciler) reconcilePGBackRest(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, instances *observedInstances, - rootCA *pki.RootCertificateAuthority) (reconcile.Result, error) { + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) (reconcile.Result, error) { // add some additional context about what component is being reconciled log := logging.FromContext(ctx).WithValues("reconciler", "pgBackRest") - // if nil, create the pgBackRest status that will be updated when reconciling various - // pgBackRest resources + // if nil, create the pgBackRest status that will be updated when + // reconciling various pgBackRest resources if postgresCluster.Status.PGBackRest == nil { postgresCluster.Status.PGBackRest = &v1beta1.PGBackRestStatus{} } @@ -1294,32 +1365,33 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // Get all currently owned pgBackRest resources in the environment as needed for // reconciliation. This includes deleting resources that should no longer exist per the // current spec (e.g. if repos, repo hosts, etc. have been removed). - repoResources, err := r.getPGBackRestResources(ctx, postgresCluster) + repoResources, err := r.getPGBackRestResources(ctx, postgresCluster, backupsSpecFound) if err != nil { // exit early if can't get and clean existing resources as needed to reconcile return reconcile.Result{}, errors.WithStack(err) } + // At this point, reconciliation is allowed, so if no backups spec is found + // clear the status and exit + if !backupsSpecFound { + postgresCluster.Status.PGBackRest = &v1beta1.PGBackRestStatus{} + return result, nil + } + var repoHost *appsv1.StatefulSet var repoHostName string - dedicatedEnabled := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - if dedicatedEnabled { - // reconcile the pgbackrest repository host - repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances) - if err != nil { - log.Error(err, "unable to reconcile pgBackRest repo host") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) - return result, nil - } - repoHostName = repoHost.GetName() - } else { - // remove the dedicated repo host status if a dedicated host is not enabled - meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, ConditionRepoHostReady) + // reconcile the pgbackrest repository host + repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances) + if err != nil { + log.Error(err, "unable to reconcile pgBackRest repo host") + result.Requeue = true + return result, nil } + repoHostName = repoHost.GetName() if err := r.reconcilePGBackRestSecret(ctx, postgresCluster, repoHost, rootCA); err != nil { log.Error(err, "unable to reconcile pgBackRest secret") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // calculate hashes for the external repository configurations in the spec (e.g. for Azure, @@ -1328,7 +1400,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, configHashes, configHash, err := pgbackrest.CalculateConfigHashes(postgresCluster) if err != nil { log.Error(err, "unable to calculate config hashes") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true return result, nil } @@ -1336,7 +1408,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, replicaCreateRepo, err := r.reconcileRepos(ctx, postgresCluster, configHashes, repoResources) if err != nil { log.Error(err, "unable to reconcile pgBackRest repo host") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true return result, nil } @@ -1351,14 +1423,14 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, configHash, naming.ClusterPodService(postgresCluster).Name, postgresCluster.GetNamespace(), instanceNames); err != nil { log.Error(err, "unable to reconcile pgBackRest configuration") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // reconcile the RBAC required to run pgBackRest Jobs (e.g. for backups) sa, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) if err != nil { log.Error(err, "unable to create replica creation backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true return result, nil } @@ -1377,14 +1449,14 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // custom configuration and ensure stanzas are still created). if err != nil { log.Error(err, "unable to create stanza") - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // If a config hash mismatch, then log an info message and requeue to try again. Add some time // to the requeue to give the pgBackRest configuration changes a chance to propagate to the // container. if configHashMismatch { log.Info("pgBackRest config hash mismatch detected, requeuing to reattempt stanza create") - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // reconcile the pgBackRest backup CronJobs requeue := r.reconcileScheduledBackups(ctx, postgresCluster, sa, repoResources.cronjobs) @@ -1395,7 +1467,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // A potential option to handle this proactively would be to use a webhook: // https://book.kubebuilder.io/cronjob-tutorial/webhook-implementation.html if requeue { - result = updateReconcileResult(result, reconcile.Result{RequeueAfter: 10 * time.Second}) + result.RequeueAfter = 10 * time.Second } // Reconcile the initial backup that is needed to enable replica creation using pgBackRest. @@ -1403,7 +1475,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, if err := r.reconcileReplicaCreateBackup(ctx, postgresCluster, instances, repoResources.replicaCreateBackupJobs, sa, configHash, replicaCreateRepo); err != nil { log.Error(err, "unable to reconcile replica creation backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } // Reconcile a manual backup as defined in the spec, and triggered by the end-user via @@ -1411,7 +1483,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, if err := r.reconcileManualBackup(ctx, postgresCluster, repoResources.manualBackupJobs, sa, instances); err != nil { log.Error(err, "unable to reconcile manual backup") - result = updateReconcileResult(result, reconcile.Result{Requeue: true}) + result.Requeue = true } return result, nil @@ -1426,7 +1498,9 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, dataSource *v1beta1.PostgresClusterDataSource, configHash string, clusterVolumes []corev1.PersistentVolumeClaim, - rootCA *pki.RootCertificateAuthority) error { + rootCA *pki.RootCertificateAuthority, + backupsSpecFound bool, +) error { // grab cluster, namespaces and repo name information from the data source sourceClusterName := dataSource.ClusterName @@ -1508,7 +1582,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, // Note that function reconcilePGBackRest only uses forCluster in observedInstances. result, err := r.reconcilePGBackRest(ctx, cluster, &observedInstances{ forCluster: []*Instance{instance}, - }, rootCA) + }, rootCA, backupsSpecFound) if err != nil || result != (reconcile.Result{}) { return fmt.Errorf("unable to reconcile pgBackRest as needed to initialize "+ "PostgreSQL data for the cluster: %w", err) @@ -1554,7 +1628,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, Namespace: cluster.GetNamespace(), }} // Reconcile the PGDATA and WAL volumes for the restore - pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes) + pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, sourceCluster) if err != nil { return errors.WithStack(err) } @@ -1568,6 +1642,9 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, return errors.WithStack(err) } + // TODO(snapshots): If pgdata is being sourced by a VolumeSnapshot then don't perform a typical restore job; + // we only want to replay the WAL. + // reconcile the pgBackRest restore Job to populate the cluster's data directory if err := r.reconcileRestoreJob(ctx, cluster, sourceCluster, pgdata, pgwal, pgtablespaces, dataSource, instanceName, instanceSetName, configHash, pgbackrest.DefaultStanzaName); err != nil { @@ -1649,7 +1726,7 @@ func (r *Reconciler) reconcileCloudBasedDataSource(ctx context.Context, Namespace: cluster.GetNamespace(), }} // Reconcile the PGDATA and WAL volumes for the restore - pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes) + pgdata, err := r.reconcilePostgresDataVolume(ctx, cluster, instanceSet, fakeSTS, clusterVolumes, nil) if err != nil { return errors.WithStack(err) } @@ -1914,8 +1991,6 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { - log := logging.FromContext(ctx).WithValues("reconcileResource", "repoConfig") - backrestConfig := pgbackrest.CreatePGBackRestConfigMapIntent(postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, instanceNames) if err := controllerutil.SetControllerReference(postgresCluster, backrestConfig, @@ -1926,12 +2001,6 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, return errors.WithStack(err) } - repoHostConfigured := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - if !repoHostConfigured { - log.V(1).Info("skipping SSH reconciliation, no repo hosts configured") - return nil - } - return nil } @@ -2218,20 +2287,18 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, return nil } - // determine if the dedicated repository host is ready (if enabled) using the repo host ready + // determine if the dedicated repository host is ready using the repo host ready // condition, and return if not - if pgbackrest.DedicatedRepoHostEnabled(postgresCluster) { - condition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) - if condition == nil || condition.Status != metav1.ConditionTrue { - return nil - } + repoCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) + if repoCondition == nil || repoCondition.Status != metav1.ConditionTrue { + return nil } // Determine if the replica create backup is complete and return if not. This allows for proper // orchestration of backup Jobs since only one backup can be run at a time. - condition := meta.FindStatusCondition(postgresCluster.Status.Conditions, + backupCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionReplicaCreate) - if condition == nil || condition.Status != metav1.ConditionTrue { + if backupCondition == nil || backupCondition.Status != metav1.ConditionTrue { return nil } @@ -2306,11 +2373,9 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, backupJob.ObjectMeta.Labels = labels backupJob.ObjectMeta.Annotations = annotations - spec, err := generateBackupJobSpecIntent(postgresCluster, repo, + spec := generateBackupJobSpecIntent(ctx, postgresCluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } + backupJob.Spec = *spec // set gvk and ownership refs @@ -2398,13 +2463,6 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, replicaRepoReady = (condition.Status == metav1.ConditionTrue) } - // get pod name and container name as needed to exec into the proper pod and create - // the pgBackRest backup - _, containerName, err := getPGBackRestExecSelector(postgresCluster, replicaCreateRepo) - if err != nil { - return errors.WithStack(err) - } - // determine if the dedicated repository host is ready using the repo host ready status var dedicatedRepoReady bool condition = meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) @@ -2431,14 +2489,10 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // - The job has failed. The Job will be deleted and recreated to try again. // - The replica creation repo has changed since the Job was created. Delete and recreate // with the Job with the proper repo configured. - // - The "config" annotation has changed, indicating there is a new primary. Delete and - // recreate the Job with the proper config mounted (applicable when a dedicated repo - // host is not enabled). // - The "config hash" annotation has changed, indicating a configuration change has been // made in the spec (specifically a change to the config for an external repo). Delete // and recreate the Job with proper hash per the current config. if failed || replicaCreateRepoChanged || - (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != containerName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { if err := r.Client.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { @@ -2454,10 +2508,9 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, } } - dedicatedEnabled := pgbackrest.DedicatedRepoHostEnabled(postgresCluster) - // return if no job has been created and the replica repo or the dedicated repo host is not - // ready - if job == nil && ((dedicatedEnabled && !dedicatedRepoReady) || !replicaRepoReady) { + // return if no job has been created and the replica repo or the dedicated + // repo host is not ready + if job == nil && (!dedicatedRepoReady || !replicaRepoReady) { return nil } @@ -2476,17 +2529,14 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, annotations = naming.Merge(postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), map[string]string{ - naming.PGBackRestCurrentConfig: containerName, - naming.PGBackRestConfigHash: configHash, + naming.PGBackRestConfigHash: configHash, }) backupJob.ObjectMeta.Labels = labels backupJob.ObjectMeta.Annotations = annotations - spec, err := generateBackupJobSpecIntent(postgresCluster, replicaCreateRepo, + spec := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, serviceAccount.GetName(), labels, annotations) - if err != nil { - return errors.WithStack(err) - } + backupJob.Spec = *spec // set gvk and ownership refs @@ -2634,13 +2684,12 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, // create a pgBackRest executor and attempt stanza creation exec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(postgresCluster.GetNamespace(), writableInstanceName, + return r.PodExec(ctx, postgresCluster.GetNamespace(), writableInstanceName, naming.ContainerDatabase, stdin, stdout, stderr, command...) } // Always attempt to create pgBackRest stanza first - configHashMismatch, err := pgbackrest.Executor(exec).StanzaCreateOrUpgrade(ctx, configHash, - false) + configHashMismatch, err := pgbackrest.Executor(exec).StanzaCreateOrUpgrade(ctx, configHash, postgresCluster) if err != nil { // record and log any errors resulting from running the stanza-create command r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, EventUnableToCreateStanzas, @@ -2668,29 +2717,8 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, return false, nil } -// getPGBackRestExecSelector returns a selector and container name that allows the proper -// Pod (along with a specific container within it) to be found within the Kubernetes -// cluster as needed to exec into the container and run a pgBackRest command. -func getPGBackRestExecSelector(postgresCluster *v1beta1.PostgresCluster, - repo v1beta1.PGBackRestRepo) (labels.Selector, string, error) { - - var err error - var podSelector labels.Selector - var containerName string - - if repo.Volume != nil { - podSelector = naming.PGBackRestDedicatedSelector(postgresCluster.GetName()) - containerName = naming.PGBackRestRepoContainerName - } else { - podSelector, err = naming.AsSelector(naming.ClusterPrimary(postgresCluster.GetName())) - containerName = naming.ContainerDatabase - } - - return podSelector, containerName, err -} - -// getRepoHostStatus is responsible for returning the pgBackRest status for the provided pgBackRest -// repository host +// getRepoHostStatus is responsible for returning the pgBackRest status for the +// provided pgBackRest repository host func getRepoHostStatus(repoHost *appsv1.StatefulSet) *v1beta1.RepoHostStatus { repoHostStatus := &v1beta1.RepoHostStatus{} @@ -2869,8 +2897,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob( labels := naming.Merge( cluster.Spec.Metadata.GetLabelsOrNil(), cluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), - naming.PGBackRestCronJobLabels(cluster.Name, repo.Name, backupType), - ) + naming.PGBackRestCronJobLabels(cluster.Name, repo.Name, backupType)) objectmeta := naming.PGBackRestCronJob(cluster, backupType, repo.Name) // Look for an existing CronJob by the associated Labels. If one exists, @@ -2934,11 +2961,8 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set backup type (i.e. "full", "diff", "incr") backupOpts := []string{"--type=" + backupType} - jobSpec, err := generateBackupJobSpecIntent(cluster, repo, + jobSpec := generateBackupJobSpecIntent(ctx, cluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) - if err != nil { - return errors.WithStack(err) - } // Suspend cronjobs when shutdown or read-only. Any jobs that have already // started will continue. @@ -2971,7 +2995,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set metadata pgBackRestCronJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("CronJob")) - err = errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) + err := errors.WithStack(r.setControllerReference(cluster, pgBackRestCronJob)) if err == nil { err = r.apply(ctx, pgBackRestCronJob) @@ -2984,3 +3008,94 @@ func (r *Reconciler) reconcilePGBackRestCronJob( } return err } + +// BackupsEnabled checks the state of the backups (i.e., if backups are in the spec, +// if a repo-host StatefulSet exists, if the annotation permitting backup deletion exists) +// and determines whether reconciliation is allowed. +// Reconciliation of backup-related Kubernetes objects is paused if +// - a user created a cluster with backups; +// - the cluster is updated to remove backups; +// - the annotation authorizing that removal is missing. +// +// This function also returns whether the spec has a defined backups or not. +func (r *Reconciler) BackupsEnabled( + ctx context.Context, + postgresCluster *v1beta1.PostgresCluster, +) ( + backupsSpecFound bool, + backupsReconciliationAllowed bool, + err error, +) { + specFound, stsNotFound, annotationFound, err := r.ObserveBackupUniverse(ctx, postgresCluster) + + switch { + case err != nil: + case specFound: + backupsSpecFound = true + backupsReconciliationAllowed = true + case annotationFound || stsNotFound: + backupsReconciliationAllowed = true + case !annotationFound && !stsNotFound: + // Destroying backups is a two key operation: + // 1. You must remove the backups section of the spec. + // 2. You must apply an annotation to the cluster. + // The existence of a StatefulSet without the backups spec is + // evidence of key 1 being turned without key 2 being turned + // -- block reconciliation until the annotation is added. + backupsReconciliationAllowed = false + default: + backupsReconciliationAllowed = false + } + return backupsSpecFound, backupsReconciliationAllowed, err +} + +// ObserveBackupUniverse returns +// - whether the spec has backups defined; +// - whether the repo-host statefulset exists; +// - whether the cluster has the annotation authorizing backup removal. +func (r *Reconciler) ObserveBackupUniverse(ctx context.Context, + postgresCluster *v1beta1.PostgresCluster, +) ( + backupsSpecFound bool, + repoHostStatefulSetNotFound bool, + backupsRemovalAnnotationFound bool, + err error, +) { + + // Does the cluster have a blank Backups section + backupsSpecFound = !reflect.DeepEqual(postgresCluster.Spec.Backups, v1beta1.Backups{PGBackRest: v1beta1.PGBackRestArchive{}}) + + // Does the repo-host StatefulSet exist? + name := fmt.Sprintf("%s-%s", postgresCluster.GetName(), "repo-host") + existing := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: postgresCluster.Namespace, + Name: name, + }, + } + err = errors.WithStack( + r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + repoHostStatefulSetNotFound = apierrors.IsNotFound(err) + + // If we have an error that is not related to a missing repo-host StatefulSet, + // we return an error and expect the calling function to correctly stop processing. + if err != nil && !repoHostStatefulSetNotFound { + return true, false, false, err + } + + backupsRemovalAnnotationFound = authorizeBackupRemovalAnnotationPresent(postgresCluster) + + // If we have reached this point, the err is either nil or an IsNotFound error + // which we do not care about; hence, pass nil rather than the err + return backupsSpecFound, repoHostStatefulSetNotFound, backupsRemovalAnnotationFound, nil +} + +func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCluster) bool { + annotations := postgresCluster.GetAnnotations() + for annotation := range annotations { + if annotation == naming.AuthorizeBackupRemovalAnnotation { + return annotations[naming.AuthorizeBackupRemovalAnnotation] == "true" + } + } + return false +} diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 999ec535fc..8e34dabb5e 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -52,6 +41,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pki" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -78,7 +68,7 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, Name: "instance1", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -129,7 +119,7 @@ func fakePostgresCluster(clusterName, namespace, clusterUID string, Volume: &v1beta1.RepoPVC{ VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -197,137 +187,137 @@ func TestReconcilePGBackRest(t *testing.T) { }) t.Cleanup(func() { teardownManager(cancel, t) }) - clusterName := "hippocluster" - clusterUID := "hippouid" + t.Run("run reconcile with backups defined", func(t *testing.T) { + clusterName := "hippocluster" + clusterUID := "hippouid" - ns := setupNamespace(t, tClient) - - // create a PostgresCluster to test with - postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + ns := setupNamespace(t, tClient) + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) - // create a service account to test with - serviceAccount, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) - assert.NilError(t, err) - assert.Assert(t, serviceAccount != nil) + // create a service account to test with + serviceAccount, err := r.reconcilePGBackRestRBAC(ctx, postgresCluster) + assert.NilError(t, err) + assert.Assert(t, serviceAccount != nil) - // create the 'observed' instances and set the leader - instances := &observedInstances{ - forCluster: []*Instance{{Name: "instance1", - Pods: []*corev1.Pod{{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, - }, - Spec: corev1.PodSpec{}, - }}, - }, {Name: "instance2"}, {Name: "instance3"}}, - } + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } - // set status - postgresCluster.Status = v1beta1.PostgresClusterStatus{ - Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, - PGBackRest: &v1beta1.PGBackRestStatus{ - RepoHost: &v1beta1.RepoHostStatus{Ready: true}, - Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, - } + // set status + postgresCluster.Status = v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + PGBackRest: &v1beta1.PGBackRestStatus{ + RepoHost: &v1beta1.RepoHostStatus{Ready: true}, + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, + } - // set conditions - clusterConditions := map[string]metav1.ConditionStatus{ - ConditionRepoHostReady: metav1.ConditionTrue, - ConditionReplicaCreate: metav1.ConditionTrue, - } - for condition, status := range clusterConditions { - meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ - Type: condition, Reason: "testing", Status: status}) - } + // set conditions + clusterConditions := map[string]metav1.ConditionStatus{ + ConditionRepoHostReady: metav1.ConditionTrue, + ConditionReplicaCreate: metav1.ConditionTrue, + } + for condition, status := range clusterConditions { + meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ + Type: condition, Reason: "testing", Status: status}) + } - rootCA, err := pki.NewRootCertificateAuthority() - assert.NilError(t, err) + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) - result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA) - if err != nil || result != (reconcile.Result{}) { - t.Errorf("unable to reconcile pgBackRest: %v", err) - } + result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + if err != nil || result != (reconcile.Result{}) { + t.Errorf("unable to reconcile pgBackRest: %v", err) + } - // repo is the first defined repo - repo := postgresCluster.Spec.Backups.PGBackRest.Repos[0] + // repo is the first defined repo + repo := postgresCluster.Spec.Backups.PGBackRest.Repos[0] - // test that the repo was created properly - t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { + // test that the repo was created properly + t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { - // get the pgBackRest repo sts using the labels we expect it to have - dedicatedRepos := &appsv1.StatefulSetList{} - if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), - client.MatchingLabels{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestDedicated: "", - }); err != nil { - t.Fatal(err) - } + // get the pgBackRest repo sts using the labels we expect it to have + dedicatedRepos := &appsv1.StatefulSetList{} + if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + }); err != nil { + t.Fatal(err) + } - repo := appsv1.StatefulSet{} - // verify that we found a repo sts as expected - if len(dedicatedRepos.Items) == 0 { - t.Fatal("Did not find a dedicated repo sts") - } else if len(dedicatedRepos.Items) > 1 { - t.Fatal("Too many dedicated repo sts's found") - } else { - repo = dedicatedRepos.Items[0] - } + repo := appsv1.StatefulSet{} + // verify that we found a repo sts as expected + if len(dedicatedRepos.Items) == 0 { + t.Fatal("Did not find a dedicated repo sts") + } else if len(dedicatedRepos.Items) > 1 { + t.Fatal("Too many dedicated repo sts's found") + } else { + repo = dedicatedRepos.Items[0] + } - // verify proper number of replicas - if *repo.Spec.Replicas != 1 { - t.Errorf("%v replicas found for dedicated repo sts, expected %v", - repo.Spec.Replicas, 1) - } + // verify proper number of replicas + if *repo.Spec.Replicas != 1 { + t.Errorf("%v replicas found for dedicated repo sts, expected %v", + repo.Spec.Replicas, 1) + } - // verify proper ownership - var foundOwnershipRef bool - for _, r := range repo.GetOwnerReferences() { - if r.Kind == "PostgresCluster" && r.Name == clusterName && - r.UID == types.UID(clusterUID) { + // verify proper ownership + var foundOwnershipRef bool + for _, r := range repo.GetOwnerReferences() { + if r.Kind == "PostgresCluster" && r.Name == clusterName && + r.UID == types.UID(clusterUID) { - foundOwnershipRef = true - break + foundOwnershipRef = true + break + } } - } - if !foundOwnershipRef { - t.Errorf("did not find expected ownership references") - } + if !foundOwnershipRef { + t.Errorf("did not find expected ownership references") + } - // verify proper matching labels - expectedLabels := map[string]string{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestDedicated: "", - } - expectedLabelsSelector, err := metav1.LabelSelectorAsSelector( - metav1.SetAsLabelSelector(expectedLabels)) - if err != nil { - t.Error(err) - } - if !expectedLabelsSelector.Matches(labels.Set(repo.GetLabels())) { - t.Errorf("dedicated repo host is missing an expected label: found=%v, expected=%v", - repo.GetLabels(), expectedLabels) - } + // verify proper matching labels + expectedLabels := map[string]string{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + } + expectedLabelsSelector, err := metav1.LabelSelectorAsSelector( + metav1.SetAsLabelSelector(expectedLabels)) + if err != nil { + t.Error(err) + } + if !expectedLabelsSelector.Matches(labels.Set(repo.GetLabels())) { + t.Errorf("dedicated repo host is missing an expected label: found=%v, expected=%v", + repo.GetLabels(), expectedLabels) + } - template := repo.Spec.Template.DeepCopy() + template := repo.Spec.Template.DeepCopy() - // Containers and Volumes should be populated. - assert.Assert(t, len(template.Spec.Containers) != 0) - assert.Assert(t, len(template.Spec.InitContainers) != 0) - assert.Assert(t, len(template.Spec.Volumes) != 0) + // Containers and Volumes should be populated. + assert.Assert(t, len(template.Spec.Containers) != 0) + assert.Assert(t, len(template.Spec.InitContainers) != 0) + assert.Assert(t, len(template.Spec.Volumes) != 0) - // Ignore Containers and Volumes in the comparison below. - template.Spec.Containers = nil - template.Spec.InitContainers = nil - template.Spec.Volumes = nil + // Ignore Containers and Volumes in the comparison below. + template.Spec.Containers = nil + template.Spec.InitContainers = nil + template.Spec.Volumes = nil - // TODO(tjmoore4): Add additional tests to test appending existing - // topology spread constraints and spec.disableDefaultPodScheduling being - // set to true (as done in instance StatefulSet tests). - assert.Assert(t, marshalMatches(template.Spec, ` + // TODO(tjmoore4): Add additional tests to test appending existing + // topology spread constraints and spec.disableDefaultPodScheduling being + // set to true (as done in instance StatefulSet tests). + assert.Assert(t, cmp.MarshalMatches(template.Spec, ` affinity: {} automountServiceAccountToken: false containers: null @@ -381,230 +371,298 @@ topologySpreadConstraints: maxSkew: 1 topologyKey: topology.kubernetes.io/zone whenUnsatisfiable: ScheduleAnyway - `)) + `)) - // verify that the repohost container exists and contains the proper env vars - var repoHostContExists bool - for _, c := range repo.Spec.Template.Spec.Containers { - if c.Name == naming.PGBackRestRepoContainerName { - repoHostContExists = true + // verify that the repohost container exists and contains the proper env vars + var repoHostContExists bool + for _, c := range repo.Spec.Template.Spec.Containers { + if c.Name == naming.PGBackRestRepoContainerName { + repoHostContExists = true + } } - } - // now verify the proper env within the container - if !repoHostContExists { - t.Errorf("dedicated repo host is missing a container with name %s", - naming.PGBackRestRepoContainerName) - } - - repoHostStatus := postgresCluster.Status.PGBackRest.RepoHost - if repoHostStatus != nil { - if repoHostStatus.APIVersion != "apps/v1" || repoHostStatus.Kind != "StatefulSet" { - t.Errorf("invalid version/kind for dedicated repo host status") + // now verify the proper env within the container + if !repoHostContExists { + t.Errorf("dedicated repo host is missing a container with name %s", + naming.PGBackRestRepoContainerName) } - } else { - t.Errorf("dedicated repo host status is missing") - } - var foundConditionRepoHostsReady bool - for _, c := range postgresCluster.Status.Conditions { - if c.Type == "PGBackRestRepoHostReady" { - foundConditionRepoHostsReady = true - break + repoHostStatus := postgresCluster.Status.PGBackRest.RepoHost + if repoHostStatus != nil { + if repoHostStatus.APIVersion != "apps/v1" || repoHostStatus.Kind != "StatefulSet" { + t.Errorf("invalid version/kind for dedicated repo host status") + } + } else { + t.Errorf("dedicated repo host status is missing") } - } - if !foundConditionRepoHostsReady { - t.Errorf("status condition PGBackRestRepoHostsReady is missing") - } - events := &corev1.EventList{} - if err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.Name, - "involvedObject.uid": clusterUID, - "reason": "RepoHostCreated", - }); err != nil { - return false, err + var foundConditionRepoHostsReady bool + for _, c := range postgresCluster.Status.Conditions { + if c.Type == "PGBackRestRepoHostReady" { + foundConditionRepoHostsReady = true + break + } } - if len(events.Items) != 1 { - return false, nil + if !foundConditionRepoHostsReady { + t.Errorf("status condition PGBackRestRepoHostsReady is missing") } - return true, nil - }); err != nil { - t.Error(err) - } - }) - - t.Run("verify pgbackrest repo volumes", func(t *testing.T) { - // get the pgBackRest repo sts using the labels we expect it to have - repoVols := &corev1.PersistentVolumeClaimList{} - if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), - client.MatchingLabels{ - naming.LabelCluster: clusterName, - naming.LabelPGBackRest: "", - naming.LabelPGBackRestRepoVolume: "", - }); err != nil { - t.Fatal(err) - } - assert.Assert(t, len(repoVols.Items) > 0) + assert.Check(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "RepoHostCreated", + }) + return len(events.Items) == 1, err + })) + }) - for _, r := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if r.Volume == nil { - continue + t.Run("verify pgbackrest repo volumes", func(t *testing.T) { + + // get the pgBackRest repo sts using the labels we expect it to have + repoVols := &corev1.PersistentVolumeClaimList{} + if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestRepoVolume: "", + }); err != nil { + t.Fatal(err) } - var foundRepoVol bool - for _, v := range repoVols.Items { - if v.GetName() == - naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { - foundRepoVol = true - break + assert.Assert(t, len(repoVols.Items) > 0) + + for _, r := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if r.Volume == nil { + continue } + var foundRepoVol bool + for _, v := range repoVols.Items { + if v.GetName() == + naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { + foundRepoVol = true + break + } + } + assert.Assert(t, foundRepoVol) } - assert.Assert(t, foundRepoVol) - } - }) + }) - t.Run("verify pgbackrest configuration", func(t *testing.T) { + t.Run("verify pgbackrest configuration", func(t *testing.T) { - config := &corev1.ConfigMap{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: naming.PGBackRestConfig(postgresCluster).Name, - Namespace: postgresCluster.GetNamespace(), - }, config); err != nil { - assert.NilError(t, err) - } - assert.Assert(t, len(config.Data) > 0) - - var instanceConfFound, dedicatedRepoConfFound bool - for k, v := range config.Data { - if v != "" { - if k == pgbackrest.CMInstanceKey { - instanceConfFound = true - } else if k == pgbackrest.CMRepoKey { - dedicatedRepoConfFound = true + config := &corev1.ConfigMap{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: naming.PGBackRestConfig(postgresCluster).Name, + Namespace: postgresCluster.GetNamespace(), + }, config); err != nil { + assert.NilError(t, err) + } + assert.Assert(t, len(config.Data) > 0) + + var instanceConfFound, dedicatedRepoConfFound bool + for k, v := range config.Data { + if v != "" { + if k == pgbackrest.CMInstanceKey { + instanceConfFound = true + } else if k == pgbackrest.CMRepoKey { + dedicatedRepoConfFound = true + } } } - } - assert.Check(t, instanceConfFound) - assert.Check(t, dedicatedRepoConfFound) - }) + assert.Check(t, instanceConfFound) + assert.Check(t, dedicatedRepoConfFound) + }) - t.Run("verify pgbackrest schedule cronjob", func(t *testing.T) { + t.Run("verify pgbackrest schedule cronjob", func(t *testing.T) { - // set status - postgresCluster.Status = v1beta1.PostgresClusterStatus{ - Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, - PGBackRest: &v1beta1.PGBackRestStatus{ - Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, - } + // set status + postgresCluster.Status = v1beta1.PostgresClusterStatus{ + Patroni: v1beta1.PatroniStatus{SystemIdentifier: "12345abcde"}, + PGBackRest: &v1beta1.PGBackRestStatus{ + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: true}}}, + } - // set conditions - clusterConditions := map[string]metav1.ConditionStatus{ - ConditionRepoHostReady: metav1.ConditionTrue, - ConditionReplicaCreate: metav1.ConditionTrue, - } + // set conditions + clusterConditions := map[string]metav1.ConditionStatus{ + ConditionRepoHostReady: metav1.ConditionTrue, + ConditionReplicaCreate: metav1.ConditionTrue, + } - for condition, status := range clusterConditions { - meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ - Type: condition, Reason: "testing", Status: status}) - } + for condition, status := range clusterConditions { + meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ + Type: condition, Reason: "testing", Status: status}) + } - requeue := r.reconcileScheduledBackups(ctx, postgresCluster, serviceAccount, fakeObservedCronJobs()) - assert.Assert(t, !requeue) + requeue := r.reconcileScheduledBackups(ctx, postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) - returnedCronJob := &batchv1.CronJob{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob); err != nil { - assert.NilError(t, err) - } + returnedCronJob := &batchv1.CronJob{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob); err != nil { + assert.NilError(t, err) + } - // check returned cronjob matches set spec - assert.Equal(t, returnedCronJob.Name, "hippocluster-repo1-full") - assert.Equal(t, returnedCronJob.Spec.Schedule, testCronSchedule) - assert.Equal(t, returnedCronJob.Spec.ConcurrencyPolicy, batchv1.ForbidConcurrent) - assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name, - "pgbackrest") - assert.Assert(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext != &corev1.SecurityContext{}) + // check returned cronjob matches set spec + assert.Equal(t, returnedCronJob.Name, "hippocluster-repo1-full") + assert.Equal(t, returnedCronJob.Spec.Schedule, testCronSchedule) + assert.Equal(t, returnedCronJob.Spec.ConcurrencyPolicy, batchv1.ForbidConcurrent) + assert.Equal(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name, + "pgbackrest") + assert.Assert(t, returnedCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].SecurityContext != &corev1.SecurityContext{}) - }) + }) - t.Run("verify pgbackrest schedule found", func(t *testing.T) { + t.Run("verify pgbackrest schedule found", func(t *testing.T) { - assert.Assert(t, backupScheduleFound(repo, "full")) + assert.Assert(t, backupScheduleFound(repo, "full")) - testrepo := v1beta1.PGBackRestRepo{ - Name: "repo1", - BackupSchedules: &v1beta1.PGBackRestBackupSchedules{ - Full: &testCronSchedule, - Differential: &testCronSchedule, - Incremental: &testCronSchedule, - }} + testrepo := v1beta1.PGBackRestRepo{ + Name: "repo1", + BackupSchedules: &v1beta1.PGBackRestBackupSchedules{ + Full: &testCronSchedule, + Differential: &testCronSchedule, + Incremental: &testCronSchedule, + }} - assert.Assert(t, backupScheduleFound(testrepo, "full")) - assert.Assert(t, backupScheduleFound(testrepo, "diff")) - assert.Assert(t, backupScheduleFound(testrepo, "incr")) + assert.Assert(t, backupScheduleFound(testrepo, "full")) + assert.Assert(t, backupScheduleFound(testrepo, "diff")) + assert.Assert(t, backupScheduleFound(testrepo, "incr")) - }) + }) + + t.Run("verify pgbackrest schedule not found", func(t *testing.T) { + + assert.Assert(t, !backupScheduleFound(repo, "notabackuptype")) + + noscheduletestrepo := v1beta1.PGBackRestRepo{Name: "repo1"} + assert.Assert(t, !backupScheduleFound(noscheduletestrepo, "full")) + + }) - t.Run("verify pgbackrest schedule not found", func(t *testing.T) { + t.Run("pgbackrest schedule suspended status", func(t *testing.T) { - assert.Assert(t, !backupScheduleFound(repo, "notabackuptype")) + returnedCronJob := &batchv1.CronJob{} + if err := tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob); err != nil { + assert.NilError(t, err) + } + + t.Run("pgbackrest schedule suspended false", func(t *testing.T) { + assert.Assert(t, !*returnedCronJob.Spec.Suspend) + }) - noscheduletestrepo := v1beta1.PGBackRestRepo{Name: "repo1"} - assert.Assert(t, !backupScheduleFound(noscheduletestrepo, "full")) + t.Run("shutdown", func(t *testing.T) { + *postgresCluster.Spec.Shutdown = true + postgresCluster.Spec.Standby = nil + requeue := r.reconcileScheduledBackups(ctx, + postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) + + assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob)) + + assert.Assert(t, *returnedCronJob.Spec.Suspend) + }) + + t.Run("standby", func(t *testing.T) { + *postgresCluster.Spec.Shutdown = false + postgresCluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ + Enabled: true, + } + + requeue := r.reconcileScheduledBackups(ctx, + postgresCluster, serviceAccount, fakeObservedCronJobs()) + assert.Assert(t, !requeue) + + assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name + "-repo1-full", + Namespace: postgresCluster.GetNamespace(), + }, returnedCronJob)) + + assert.Assert(t, *returnedCronJob.Spec.Suspend) + }) + }) }) - t.Run("pgbackrest schedule suspended status", func(t *testing.T) { + t.Run("run reconcile with backups not defined", func(t *testing.T) { + clusterName := "hippocluster2" + clusterUID := "hippouid2" + + ns := setupNamespace(t, tClient) + // create a PostgresCluster without backups to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + postgresCluster.Spec.Backups = v1beta1.Backups{} - returnedCronJob := &batchv1.CronJob{} - if err := tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob); err != nil { - assert.NilError(t, err) + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, } - t.Run("pgbackrest schedule suspended false", func(t *testing.T) { - assert.Assert(t, !*returnedCronJob.Spec.Suspend) - }) + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) - t.Run("shutdown", func(t *testing.T) { - *postgresCluster.Spec.Shutdown = true - postgresCluster.Spec.Standby = nil + result, err := r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, false) + if err != nil { + t.Errorf("unable to reconcile pgBackRest: %v", err) + } + assert.Equal(t, result, reconcile.Result{}) - requeue := r.reconcileScheduledBackups(ctx, - postgresCluster, serviceAccount, fakeObservedCronJobs()) - assert.Assert(t, !requeue) + t.Run("verify pgbackrest dedicated repo StatefulSet", func(t *testing.T) { - assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob)) + // Verify the sts doesn't exist + dedicatedRepos := &appsv1.StatefulSetList{} + if err := tClient.List(ctx, dedicatedRepos, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestDedicated: "", + }); err != nil { + t.Fatal(err) + } - assert.Assert(t, *returnedCronJob.Spec.Suspend) + assert.Equal(t, len(dedicatedRepos.Items), 0) }) - t.Run("standby", func(t *testing.T) { - *postgresCluster.Spec.Shutdown = false - postgresCluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ - Enabled: true, + t.Run("verify pgbackrest repo volumes", func(t *testing.T) { + + // get the pgBackRest repo sts using the labels we expect it to have + repoVols := &corev1.PersistentVolumeClaimList{} + if err := tClient.List(ctx, repoVols, client.InNamespace(ns.Name), + client.MatchingLabels{ + naming.LabelCluster: clusterName, + naming.LabelPGBackRest: "", + naming.LabelPGBackRestRepoVolume: "", + }); err != nil { + t.Fatal(err) } - requeue := r.reconcileScheduledBackups(ctx, - postgresCluster, serviceAccount, fakeObservedCronJobs()) - assert.Assert(t, !requeue) + assert.Equal(t, len(repoVols.Items), 0) + }) - assert.NilError(t, tClient.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name + "-repo1-full", - Namespace: postgresCluster.GetNamespace(), - }, returnedCronJob)) + t.Run("verify pgbackrest configuration", func(t *testing.T) { - assert.Assert(t, *returnedCronJob.Spec.Suspend) + config := &corev1.ConfigMap{} + err := tClient.Get(ctx, types.NamespacedName{ + Name: naming.PGBackRestConfig(postgresCluster).Name, + Namespace: postgresCluster.GetNamespace(), + }, config) + assert.Equal(t, apierrors.IsNotFound(err), true) }) }) } @@ -706,13 +764,13 @@ func TestReconcileStanzaCreate(t *testing.T) { }, }}) - stanzaCreateFail := func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + stanzaCreateFail := func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { return errors.New("fake stanza create failed") } - stanzaCreateSuccess := func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + stanzaCreateSuccess := func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { return nil } @@ -730,23 +788,18 @@ func TestReconcileStanzaCreate(t *testing.T) { assert.NilError(t, err) assert.Assert(t, !configHashMismatch) - events := &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.Name, - "involvedObject.uid": clusterUID, - "reason": "StanzasCreated", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "StanzasCreated", + }) + return len(events.Items) == 1, err + })) // status should indicate stanzas were created for _, r := range postgresCluster.Status.PGBackRest.Repos { @@ -774,23 +827,18 @@ func TestReconcileStanzaCreate(t *testing.T) { assert.Error(t, err, "fake stanza create failed: ") assert.Assert(t, !configHashMismatch) - events = &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.Name, - "involvedObject.uid": clusterUID, - "reason": "UnableToCreateStanzas", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.Name, + "involvedObject.uid": clusterUID, + "reason": "UnableToCreateStanzas", + }) + return len(events.Items) == 1, err + })) // status should indicate stanza were not created for _, r := range postgresCluster.Status.PGBackRest.Repos { @@ -798,52 +846,6 @@ func TestReconcileStanzaCreate(t *testing.T) { } } -func TestGetPGBackRestExecSelector(t *testing.T) { - - testCases := []struct { - cluster *v1beta1.PostgresCluster - repo v1beta1.PGBackRestRepo - desc string - expectedSelector string - expectedContainer string - }{{ - desc: "volume repo defined dedicated repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - }, - repo: v1beta1.PGBackRestRepo{ - Name: "repo1", - Volume: &v1beta1.RepoPVC{}, - }, - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/pgbackrest=," + - "postgres-operator.crunchydata.com/pgbackrest-dedicated=", - expectedContainer: "pgbackrest", - }, { - desc: "cloud repo defined no repo host enabled", - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "hippo"}, - }, - repo: v1beta1.PGBackRestRepo{ - Name: "repo1", - S3: &v1beta1.RepoS3{}, - }, - expectedSelector: "postgres-operator.crunchydata.com/cluster=hippo," + - "postgres-operator.crunchydata.com/instance," + - "postgres-operator.crunchydata.com/role=master", - expectedContainer: "database", - }} - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - selector, container, err := getPGBackRestExecSelector(tc.cluster, tc.repo) - assert.NilError(t, err) - assert.Assert(t, selector.String() == tc.expectedSelector) - assert.Assert(t, container == tc.expectedContainer) - }) - } -} - func TestReconcileReplicaCreateBackup(t *testing.T) { // Garbage collector cleans up test resources before the test completes if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { @@ -928,17 +930,13 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } assert.Assert(t, foundOwnershipRef) - var foundConfigAnnotation, foundHashAnnotation bool + var foundHashAnnotation bool // verify annotations for k, v := range backupJob.GetAnnotations() { - if k == naming.PGBackRestCurrentConfig && v == naming.PGBackRestRepoContainerName { - foundConfigAnnotation = true - } if k == naming.PGBackRestConfigHash && v == configHash { foundHashAnnotation = true } } - assert.Assert(t, foundConfigAnnotation) assert.Assert(t, foundHashAnnotation) // verify container & env vars @@ -1424,23 +1422,18 @@ func TestReconcileManualBackup(t *testing.T) { // if an event is expected, the check for it if tc.expectedEventReason != "" { - events := &corev1.EventList{} - err = wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.GetName(), - "involvedObject.uid": string(postgresCluster.GetUID()), - "reason": tc.expectedEventReason, - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.GetName(), + "involvedObject.uid": string(postgresCluster.GetUID()), + "reason": tc.expectedEventReason, + }) + return len(events.Items) == 1, err + })) } return } @@ -1559,7 +1552,7 @@ func TestGetPGBackRestResources(t *testing.T) { }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -1598,7 +1591,7 @@ func TestGetPGBackRestResources(t *testing.T) { }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -1665,47 +1658,11 @@ func TestGetPGBackRestResources(t *testing.T) { jobCount: 0, pvcCount: 0, hostCount: 1, }, }, { - desc: "no dedicated repo host defined delete dedicated sts", - createResources: []client.Object{ - &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "delete-dedicated", - Namespace: namespace, - Labels: naming.PGBackRestDedicatedLabels(clusterName), - }, - Spec: appsv1.StatefulSetSpec{ - Selector: metav1.SetAsLabelSelector( - naming.PGBackRestDedicatedLabels(clusterName)), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: naming.PGBackRestDedicatedLabels(clusterName), - }, - Spec: corev1.PodSpec{}, - }, - }, - }, - }, - cluster: &v1beta1.PostgresCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - Namespace: namespace, - UID: types.UID(clusterUID), - }, - Spec: v1beta1.PostgresClusterSpec{ - Backups: v1beta1.Backups{ - PGBackRest: v1beta1.PGBackRestArchive{}, - }, - }, - }, - result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, - }, - }, { - desc: "no repo host defined delete dedicated sts", + desc: "no dedicated repo host defined, dedicated sts not deleted", createResources: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "delete-dedicated-no-repo-host", + Name: "keep-dedicated-two", Namespace: namespace, Labels: naming.PGBackRestDedicatedLabels(clusterName), }, @@ -1734,7 +1691,8 @@ func TestGetPGBackRestResources(t *testing.T) { }, }, result: testResult{ - jobCount: 0, pvcCount: 0, hostCount: 0, + // Host count is 2 due to previous repo host sts not being deleted. + jobCount: 0, pvcCount: 0, hostCount: 2, }, }} @@ -1747,7 +1705,7 @@ func TestGetPGBackRestResources(t *testing.T) { assert.NilError(t, err) assert.NilError(t, tClient.Create(ctx, resource)) - resources, err := r.getPGBackRestResources(ctx, tc.cluster) + resources, err := r.getPGBackRestResources(ctx, tc.cluster, true) assert.NilError(t, err) assert.Assert(t, tc.result.jobCount == len(resources.replicaCreateBackupJobs)) @@ -1984,7 +1942,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { pgclusterDataSource = tc.dataSource.PostgresCluster } err := r.reconcilePostgresClusterDataSource(ctx, cluster, pgclusterDataSource, - "testhash", nil, rootCA) + "testhash", nil, rootCA, true) assert.NilError(t, err) restoreConfig := &corev1.ConfigMap{} @@ -2035,23 +1993,17 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { if tc.result.invalidSourceCluster || tc.result.invalidSourceRepo || tc.result.invalidOptions { - events := &corev1.EventList{} - if err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": namespace, - "reason": "InvalidDataSource", - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }); err != nil { - t.Error(err) - } + assert.Check(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": namespace, + "reason": "InvalidDataSource", + }) + return len(events.Items) == 1, err + })) } }) } @@ -2099,7 +2051,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { result: testResult{ configCount: 1, jobCount: 1, pvcCount: 1, expectedClusterCondition: nil, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }, { desc: "global/configuration set", @@ -2116,7 +2068,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { result: testResult{ configCount: 1, jobCount: 1, pvcCount: 1, expectedClusterCondition: nil, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = elephant\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = elephant\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }, { desc: "invalid option: stanza", @@ -2131,7 +2083,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { result: testResult{ configCount: 1, jobCount: 0, pvcCount: 1, expectedClusterCondition: nil, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }, { desc: "cluster bootstrapped init condition missing", @@ -2150,7 +2102,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { Reason: "ClusterAlreadyBootstrapped", Message: "The cluster is already bootstrapped", }, - conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", + conf: "|\n # Generated by postgres-operator. DO NOT EDIT.\n # Your changes will not be saved.\n\n [global]\n archive-async = y\n log-path = /pgdata/pgbackrest/log\n repo1-path = /pgbackrest/repo1\n spool-path = /pgdata/pgbackrest-spool\n\n [db]\n pg1-path = /pgdata/pg13\n pg1-port = 5432\n pg1-socket-path = /tmp/postgres\n", }, }} @@ -2195,7 +2147,7 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %#v", err) } else { assert.NilError(t, err) - assert.Assert(t, marshalMatches(restoreConfig.Data["pgbackrest_instance.conf"], tc.result.conf)) + assert.Assert(t, cmp.MarshalMatches(restoreConfig.Data["pgbackrest_instance.conf"], tc.result.conf)) } restoreJobs := &batchv1.JobList{} @@ -2281,7 +2233,7 @@ func TestCopyConfigurationResources(t *testing.T) { Name: "instance1", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -2333,7 +2285,7 @@ func TestCopyConfigurationResources(t *testing.T) { Name: "instance1", DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -2486,14 +2438,14 @@ func TestCopyConfigurationResources(t *testing.T) { } func TestGenerateBackupJobIntent(t *testing.T) { + ctx := context.Background() t.Run("empty", func(t *testing.T) { - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, &v1beta1.PostgresCluster{}, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) - assert.Assert(t, marshalMatches(spec.Template.Spec, ` + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: - /opt/crunchy/bin/pgbackrest @@ -2505,10 +2457,10 @@ containers: - name: COMPARE_HASH value: "true" - name: CONTAINER - value: database + value: pgbackrest - name: NAMESPACE - name: SELECTOR - value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/instance,postgres-operator.crunchydata.com/role=master + value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/pgbackrest=,postgres-operator.crunchydata.com/pgbackrest-dedicated= name: pgbackrest resources: {} securityContext: @@ -2535,11 +2487,23 @@ volumes: sources: - configMap: items: - - key: pgbackrest_instance.conf - path: pgbackrest_instance.conf + - key: pgbackrest_repo.conf + path: pgbackrest_repo.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: -pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: -pgbackrest `)) }) @@ -2549,12 +2513,11 @@ volumes: ImagePullPolicy: corev1.PullAlways, }, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Containers[0].ImagePullPolicy, corev1.PullAlways) }) @@ -2565,12 +2528,11 @@ volumes: cluster.Spec.Backups = v1beta1.Backups{ PGBackRest: v1beta1.PGBackRestArchive{}, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{}) }) @@ -2583,12 +2545,11 @@ volumes: }, }, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Containers[0].Resources, corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -2623,12 +2584,11 @@ volumes: }, }, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.Affinity, affinity) }) @@ -2637,12 +2597,11 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ PriorityClassName: initialize.String("some-priority-class"), } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Equal(t, job.Template.Spec.PriorityClassName, "some-priority-class") }) @@ -2656,12 +2615,11 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ Tolerations: tolerations, } - job, err := generateBackupJobSpecIntent( + job := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.DeepEqual(t, job.Template.Spec.Tolerations, tolerations) }) @@ -2671,18 +2629,16 @@ volumes: t.Run("Undefined", func(t *testing.T) { cluster.Spec.Backups.PGBackRest.Jobs = nil - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{} - spec, err = generateBackupJobSpecIntent( + spec = generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) }) @@ -2691,10 +2647,9 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(0), } - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(0)) } @@ -2705,10 +2660,9 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(100), } - spec, err := generateBackupJobSpecIntent( + spec := generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) - assert.NilError(t, err) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { assert.Equal(t, *spec.TTLSecondsAfterFinished, int32(100)) } @@ -2720,16 +2674,17 @@ func TestGenerateRepoHostIntent(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) + ctx := context.Background() r := Reconciler{Client: cc} t.Run("empty", func(t *testing.T) { - _, err := r.generateRepoHostIntent(&v1beta1.PostgresCluster{}, "", &RepoResources{}, + _, err := r.generateRepoHostIntent(ctx, &v1beta1.PostgresCluster{}, "", &RepoResources{}, &observedInstances{}) assert.NilError(t, err) }) cluster := &v1beta1.PostgresCluster{} - sts, err := r.generateRepoHostIntent(cluster, "", &RepoResources{}, &observedInstances{}) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, &observedInstances{}) assert.NilError(t, err) t.Run("ServiceAccount", func(t *testing.T) { @@ -2750,7 +2705,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { }, } observed := &observedInstances{forCluster: []*Instance{{Pods: []*corev1.Pod{{}}}}} - sts, err := r.generateRepoHostIntent(cluster, "", &RepoResources{}, observed) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) assert.NilError(t, err) assert.Equal(t, *sts.Spec.Replicas, int32(1)) }) @@ -2762,7 +2717,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { }, } observed := &observedInstances{forCluster: []*Instance{{}}} - sts, err := r.generateRepoHostIntent(cluster, "", &RepoResources{}, observed) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) assert.NilError(t, err) assert.Equal(t, *sts.Spec.Replicas, int32(0)) }) @@ -3627,23 +3582,18 @@ func TestReconcileScheduledBackups(t *testing.T) { // if an event is expected, the check for it if tc.expectedEventReason != "" { - events := &corev1.EventList{} - err := wait.Poll(time.Second/2, Scale(time.Second*2), func() (bool, error) { - if err := tClient.List(ctx, events, &client.MatchingFields{ - "involvedObject.kind": "PostgresCluster", - "involvedObject.name": clusterName, - "involvedObject.namespace": ns.GetName(), - "involvedObject.uid": string(postgresCluster.GetUID()), - "reason": tc.expectedEventReason, - }); err != nil { - return false, err - } - if len(events.Items) != 1 { - return false, nil - } - return true, nil - }) - assert.NilError(t, err) + assert.NilError(t, wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*2), false, + func(ctx context.Context) (bool, error) { + events := &corev1.EventList{} + err := tClient.List(ctx, events, &client.MatchingFields{ + "involvedObject.kind": "PostgresCluster", + "involvedObject.name": clusterName, + "involvedObject.namespace": ns.GetName(), + "involvedObject.uid": string(postgresCluster.GetUID()), + "reason": tc.expectedEventReason, + }) + return len(events.Items) == 1, err + })) } } else if !tc.expectReconcile && tc.expectRequeue { // expect requeue, no reconcile @@ -3786,3 +3736,167 @@ func TestSetScheduledJobStatus(t *testing.T) { assert.Assert(t, len(postgresCluster.Status.PGBackRest.ScheduledBackups) == 0) }) } + +func TestBackupsEnabled(t *testing.T) { + // Garbage collector cleans up test resources before the test completes + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") + } + + cfg, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 2) + + r := &Reconciler{} + ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { + r = &Reconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor(ControllerName), + Tracer: otel.Tracer(ControllerName), + Owner: ControllerName, + } + }) + t.Cleanup(func() { teardownManager(cancel, t) }) + + t.Run("Cluster with backups, no sts can be reconciled", func(t *testing.T) { + clusterName := "hippocluster1" + clusterUID := "hippouid1" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with backups, sts can be reconciled", func(t *testing.T) { + clusterName := "hippocluster2" + clusterUID := "hippouid2" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, no sts can reconcile", func(t *testing.T) { + // create a PostgresCluster to test with + clusterName := "hippocluster3" + clusterUID := "hippouid3" + + ns := setupNamespace(t, tClient) + + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + postgresCluster.Spec.Backups = v1beta1.Backups{} + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, sts cannot be reconciled", func(t *testing.T) { + clusterName := "hippocluster4" + clusterUID := "hippouid4" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + postgresCluster.Spec.Backups = v1beta1.Backups{} + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, !backupsReconciliationAllowed) + }) + + t.Run("Cluster with no backups, sts, annotation can be reconciled", func(t *testing.T) { + clusterName := "hippocluster5" + clusterUID := "hippouid5" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + + // create the 'observed' instances and set the leader + instances := &observedInstances{ + forCluster: []*Instance{{Name: "instance1", + Pods: []*corev1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{naming.LabelRole: naming.RolePatroniLeader}, + }, + Spec: corev1.PodSpec{}, + }}, + }, {Name: "instance2"}, {Name: "instance3"}}, + } + + rootCA, err := pki.NewRootCertificateAuthority() + assert.NilError(t, err) + + _, err = r.reconcilePGBackRest(ctx, postgresCluster, instances, rootCA, true) + assert.NilError(t, err) + + postgresCluster.Spec.Backups = v1beta1.Backups{} + annotations := map[string]string{ + naming.AuthorizeBackupRemovalAnnotation: "true", + } + postgresCluster.Annotations = annotations + + backupsSpecFound, backupsReconciliationAllowed, err := r.BackupsEnabled(ctx, postgresCluster) + + assert.NilError(t, err) + assert.Assert(t, !backupsSpecFound) + assert.Assert(t, backupsReconciliationAllowed) + }) +} diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index 9234b9f2a0..76207fac02 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -181,8 +170,8 @@ func (r *Reconciler) reconcilePGBouncerInPostgreSQL( if err == nil { ctx := logging.NewContext(ctx, logging.FromContext(ctx).WithValues("revision", revision)) - err = action(ctx, func(_ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + err = action(ctx, func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) }) } if err == nil { @@ -315,12 +304,8 @@ func (r *Reconciler) generatePGBouncerService( } servicePort.NodePort = *spec.NodePort } - if spec.ExternalTrafficPolicy != nil { - service.Spec.ExternalTrafficPolicy = *spec.ExternalTrafficPolicy - } - if spec.InternalTrafficPolicy != nil { - service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy - } + service.Spec.ExternalTrafficPolicy = initialize.FromPointer(spec.ExternalTrafficPolicy) + service.Spec.InternalTrafficPolicy = spec.InternalTrafficPolicy } service.Spec.Ports = []corev1.ServicePort{servicePort} @@ -357,7 +342,7 @@ func (r *Reconciler) reconcilePGBouncerService( // generatePGBouncerDeployment returns an appsv1.Deployment that runs PgBouncer pods. func (r *Reconciler) generatePGBouncerDeployment( - cluster *v1beta1.PostgresCluster, + ctx context.Context, cluster *v1beta1.PostgresCluster, primaryCertificate *corev1.SecretProjection, configmap *corev1.ConfigMap, secret *corev1.Secret, ) (*appsv1.Deployment, bool, error) { @@ -410,25 +395,20 @@ func (r *Reconciler) generatePGBouncerDeployment( // - https://docs.k8s.io/concepts/workloads/controllers/deployment/#rolling-update-deployment deploy.Spec.Strategy.Type = appsv1.RollingUpdateDeploymentStrategyType deploy.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ - MaxUnavailable: intstr.ValueOrDefault(nil, intstr.FromInt(0)), + MaxUnavailable: initialize.Pointer(intstr.FromInt32(0)), } // Use scheduling constraints from the cluster spec. deploy.Spec.Template.Spec.Affinity = cluster.Spec.Proxy.PGBouncer.Affinity deploy.Spec.Template.Spec.Tolerations = cluster.Spec.Proxy.PGBouncer.Tolerations - - if cluster.Spec.Proxy.PGBouncer.PriorityClassName != nil { - deploy.Spec.Template.Spec.PriorityClassName = *cluster.Spec.Proxy.PGBouncer.PriorityClassName - } - + deploy.Spec.Template.Spec.PriorityClassName = + initialize.FromPointer(cluster.Spec.Proxy.PGBouncer.PriorityClassName) deploy.Spec.Template.Spec.TopologySpreadConstraints = cluster.Spec.Proxy.PGBouncer.TopologySpreadConstraints // if default pod scheduling is not explicitly disabled, add the default // pod topology spread constraints - if cluster.Spec.DisableDefaultPodScheduling == nil || - (cluster.Spec.DisableDefaultPodScheduling != nil && - !*cluster.Spec.DisableDefaultPodScheduling) { + if !initialize.FromPointer(cluster.Spec.DisableDefaultPodScheduling) { deploy.Spec.Template.Spec.TopologySpreadConstraints = append( deploy.Spec.Template.Spec.TopologySpreadConstraints, defaultTopologySpreadConstraints(*deploy.Spec.Selector)...) @@ -461,7 +441,7 @@ func (r *Reconciler) generatePGBouncerDeployment( err := errors.WithStack(r.setControllerReference(cluster, deploy)) if err == nil { - pgbouncer.Pod(cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template.Spec) + pgbouncer.Pod(ctx, cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template.Spec) } return deploy, true, err @@ -477,7 +457,7 @@ func (r *Reconciler) reconcilePGBouncerDeployment( configmap *corev1.ConfigMap, secret *corev1.Secret, ) error { deploy, specified, err := r.generatePGBouncerDeployment( - cluster, primaryCertificate, configmap, secret) + ctx, cluster, primaryCertificate, configmap, secret) // Set observations whether the deployment exists or not. defer func() { diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index ed9361bb7e..9bbced5247 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -26,11 +15,13 @@ import ( policyv1 "k8s.io/api/policy/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -59,7 +50,7 @@ func TestGeneratePGBouncerService(t *testing.T) { assert.NilError(t, err) assert.Assert(t, !specified) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null name: pg7-pgbouncer namespace: ns5 @@ -74,11 +65,11 @@ namespace: ns5 } alwaysExpect := func(t testing.TB, service *corev1.Service) { - assert.Assert(t, marshalMatches(service.TypeMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.TypeMeta, ` apiVersion: v1 kind: Service `)) - assert.Assert(t, marshalMatches(service.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta, ` creationTimestamp: null labels: postgres-operator.crunchydata.com/cluster: pg7 @@ -172,7 +163,7 @@ ownerReferences: alwaysExpect(t, service) // Defaults to ClusterIP. assert.Equal(t, service.Spec.Type, corev1.ServiceTypeClusterIP) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer port: 9651 protocol: TCP @@ -205,7 +196,7 @@ ownerReferences: assert.Assert(t, specified) alwaysExpect(t, service) test.Expect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer port: 9651 protocol: TCP @@ -230,7 +221,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeNodePort) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer nodePort: 32001 port: 9651 @@ -243,7 +234,7 @@ ownerReferences: assert.NilError(t, err) assert.Equal(t, service.Spec.Type, corev1.ServiceTypeLoadBalancer) alwaysExpect(t, service) - assert.Assert(t, marshalMatches(service.Spec.Ports, ` + assert.Assert(t, cmp.MarshalMatches(service.Spec.Ports, ` - name: pgbouncer nodePort: 32002 port: 9651 @@ -377,6 +368,7 @@ func TestGeneratePGBouncerDeployment(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) + ctx := context.Background() reconciler := &Reconciler{Client: cc} cluster := &v1beta1.PostgresCluster{} @@ -390,11 +382,11 @@ func TestGeneratePGBouncerDeployment(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Proxy = spec - deploy, specified, err := reconciler.generatePGBouncerDeployment(cluster, nil, nil, nil) + deploy, specified, err := reconciler.generatePGBouncerDeployment(ctx, cluster, nil, nil, nil) assert.NilError(t, err) assert.Assert(t, !specified) - assert.Assert(t, marshalMatches(deploy.ObjectMeta, ` + assert.Assert(t, cmp.MarshalMatches(deploy.ObjectMeta, ` creationTimestamp: null name: test-cluster-pgbouncer namespace: ns3 @@ -423,7 +415,7 @@ namespace: ns3 } deploy, specified, err := reconciler.generatePGBouncerDeployment( - cluster, primary, configmap, secret) + ctx, cluster, primary, configmap, secret) assert.NilError(t, err) assert.Assert(t, specified) @@ -463,7 +455,7 @@ namespace: ns3 t.Run("PodSpec", func(t *testing.T) { deploy, specified, err := reconciler.generatePGBouncerDeployment( - cluster, primary, configmap, secret) + ctx, cluster, primary, configmap, secret) assert.NilError(t, err) assert.Assert(t, specified) @@ -479,7 +471,7 @@ namespace: ns3 // topology spread constraints and spec.disableDefaultPodScheduling being // set to true (as done in instance StatefulSet tests). - assert.Assert(t, marshalMatches(deploy.Spec.Template.Spec, ` + assert.Assert(t, cmp.MarshalMatches(deploy.Spec.Template.Spec, ` automountServiceAccountToken: false containers: null enableServiceLinks: false @@ -509,7 +501,7 @@ topologySpreadConstraints: cluster.Spec.DisableDefaultPodScheduling = initialize.Bool(true) deploy, specified, err := reconciler.generatePGBouncerDeployment( - cluster, primary, configmap, secret) + ctx, cluster, primary, configmap, secret) assert.NilError(t, err) assert.Assert(t, specified) @@ -560,7 +552,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringInt32(0) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, !foundPDB(cluster)) }) @@ -569,7 +561,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringInt32(1) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -578,7 +570,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster)) t.Run("deleted", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringInt32(0) + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(0)) err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -596,7 +588,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("50%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -605,7 +597,7 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { assert.Assert(t, foundPDB(cluster)) t.Run("deleted", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("0%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("0%")) err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update @@ -619,13 +611,13 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { }) t.Run("delete with 00%", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("50%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) t.Run("deleted", func(t *testing.T) { - cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.IntOrStringString("00%") + cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("00%")) err := r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster) if apierrors.IsConflict(err) { // When running in an existing environment another controller will sometimes update diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index e0bec9d4ed..e1b5186cb4 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -27,6 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -144,9 +134,9 @@ func (r *Reconciler) reconcilePGMonitorExporter(ctx context.Context, // Apply the necessary SQL and record its hash in cluster.Status if err == nil { - err = action(ctx, func(_ context.Context, stdin io.Reader, + err = action(ctx, func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { - return r.PodExec(writablePod.Namespace, writablePod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, writablePod.Namespace, writablePod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) }) } if err == nil { @@ -240,11 +230,12 @@ func (r *Reconciler) reconcileMonitoringSecret( // addPGMonitorToInstancePodSpec performs the necessary setup to add // pgMonitor resources on a PodTemplateSpec func addPGMonitorToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { - err := addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, exporterWebConfig) + err := addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, exporterWebConfig) return err } @@ -255,6 +246,7 @@ func addPGMonitorToInstancePodSpec( // the exporter container cannot be created; Testing relies on ensuring the // monitoring secret is available func addPGMonitorExporterToInstancePodSpec( + ctx context.Context, cluster *v1beta1.PostgresCluster, template *corev1.PodTemplateSpec, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap) error { @@ -267,13 +259,34 @@ func addPGMonitorExporterToInstancePodSpec( withBuiltInCollectors := !strings.EqualFold(cluster.Annotations[naming.PostgresExporterCollectorsAnnotation], "None") + var cmd []string + // PG 17 does not include some of the columns found in stat_bgwriter with older PGs. + // Selectively turn off the collector for stat_bgwriter in PG 17, unless the user + // requests all collectors to be turned off. + switch { + case cluster.Spec.PostgresVersion == 17 && withBuiltInCollectors && certSecret == nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterDeactivateStatBGWriterFlag) + case cluster.Spec.PostgresVersion == 17 && withBuiltInCollectors && certSecret != nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterWebConfigFileFlag, + pgmonitor.ExporterDeactivateStatBGWriterFlag) + // If you're turning off all built-in collectors, we don't care which + // version of PG you're using. + case certSecret != nil: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors, + pgmonitor.ExporterWebConfigFileFlag) + default: + cmd = pgmonitor.ExporterStartCommand(withBuiltInCollectors) + } + securityContext := initialize.RestrictedSecurityContext() exporterContainer := corev1.Container{ Name: naming.ContainerPGMonitorExporter, Image: config.PGExporterContainerImage(cluster), ImagePullPolicy: cluster.Spec.ImagePullPolicy, Resources: cluster.Spec.Monitoring.PGMonitor.Exporter.Resources, - Command: pgmonitor.ExporterStartCommand(withBuiltInCollectors), + Command: cmd, Env: []corev1.EnvVar{ {Name: "DATA_SOURCE_URI", Value: fmt.Sprintf("%s:%d/%s", pgmonitor.ExporterHost, *cluster.Spec.Port, pgmonitor.ExporterDB)}, {Name: "DATA_SOURCE_USER", Value: pgmonitor.MonitoringUser}, @@ -323,7 +336,7 @@ func addPGMonitorExporterToInstancePodSpec( // Therefore, we only want to add the default queries ConfigMap as a source for the // "exporter-config" volume if the AppendCustomQueries feature gate is turned on OR if the // user has not provided any custom configuration. - if util.DefaultMutableFeatureGate.Enabled(util.AppendCustomQueries) || + if feature.Enabled(ctx, feature.AppendCustomQueries) || cluster.Spec.Monitoring.PGMonitor.Exporter.Configuration == nil { defaultConfigVolumeProjection := corev1.VolumeProjection{ @@ -365,8 +378,6 @@ func addPGMonitorExporterToInstancePodSpec( }} exporterContainer.VolumeMounts = append(exporterContainer.VolumeMounts, mounts...) - exporterContainer.Command = pgmonitor.ExporterStartCommand( - withBuiltInCollectors, pgmonitor.ExporterWebConfigFileFlag) } template.Spec.Containers = append(template.Spec.Containers, exporterContainer) diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index 4549e5a523..8d8c8281d0 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -31,15 +20,15 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -func testExporterCollectorsAnnotation(t *testing.T, cluster *v1beta1.PostgresCluster, queriesConfig, webConfig *corev1.ConfigMap) { +func testExporterCollectorsAnnotation(t *testing.T, ctx context.Context, cluster *v1beta1.PostgresCluster, queriesConfig, webConfig *corev1.ConfigMap) { t.Helper() t.Run("ExporterCollectorsAnnotation", func(t *testing.T) { @@ -50,7 +39,7 @@ func testExporterCollectorsAnnotation(t *testing.T, cluster *v1beta1.PostgresClu naming.PostgresExporterCollectorsAnnotation: "wrong-value", }) - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, queriesConfig, webConfig)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) assert.Equal(t, len(template.Spec.Containers), 1) container := template.Spec.Containers[0] @@ -67,7 +56,7 @@ func testExporterCollectorsAnnotation(t *testing.T, cluster *v1beta1.PostgresClu naming.PostgresExporterCollectorsAnnotation: "None", }) - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, queriesConfig, webConfig)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) assert.Equal(t, len(template.Spec.Containers), 1) container := template.Spec.Containers[0] @@ -82,7 +71,7 @@ func testExporterCollectorsAnnotation(t *testing.T, cluster *v1beta1.PostgresClu naming.PostgresExporterCollectorsAnnotation: "none", }) - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, queriesConfig, webConfig)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, queriesConfig, webConfig)) assert.Assert(t, cmp.Contains(strings.Join(template.Spec.Containers[0].Command, "\n"), "--[no-]collector")) }) }) @@ -90,6 +79,9 @@ func testExporterCollectorsAnnotation(t *testing.T, cluster *v1beta1.PostgresClu } func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { + t.Parallel() + + ctx := context.Background() image := "test/image:tag" cluster := &v1beta1.PostgresCluster{} @@ -108,13 +100,11 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { t.Run("ExporterDisabled", func(t *testing.T) { template := &corev1.PodTemplateSpec{} - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, nil, nil)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, nil, nil)) assert.DeepEqual(t, template, &corev1.PodTemplateSpec{}) }) t.Run("ExporterEnabled", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.AppendCustomQueries+"=false"))) - cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ Exporter: &v1beta1.ExporterSpec{ @@ -131,7 +121,7 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) { }, } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) assert.Equal(t, len(template.Spec.Containers), 2) container := template.Spec.Containers[1] @@ -189,12 +179,10 @@ volumeMounts: secretName: pg1-monitoring `)) - testExporterCollectorsAnnotation(t, cluster, exporterQueriesConfig, nil) + testExporterCollectorsAnnotation(t, ctx, cluster, exporterQueriesConfig, nil) }) t.Run("CustomConfigAppendCustomQueriesOff", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.AppendCustomQueries+"=false"))) - cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ Exporter: &v1beta1.ExporterSpec{ @@ -217,7 +205,7 @@ volumeMounts: }, } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) assert.Equal(t, len(template.Spec.Containers), 2) container := template.Spec.Containers[1] @@ -239,7 +227,11 @@ name: exporter-config }) t.Run("CustomConfigAppendCustomQueriesOn", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.AppendCustomQueries+"=true"))) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.AppendCustomQueries: true, + })) + ctx := feature.NewContext(ctx, gate) cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ @@ -263,7 +255,7 @@ name: exporter-config }, } - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, nil)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, nil)) assert.Equal(t, len(template.Spec.Containers), 2) container := template.Spec.Containers[1] @@ -287,8 +279,6 @@ name: exporter-config }) t.Run("CustomTLS", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.AppendCustomQueries+"=false"))) - cluster.Spec.Monitoring = &v1beta1.MonitoringSpec{ PGMonitor: &v1beta1.PGMonitorSpec{ Exporter: &v1beta1.ExporterSpec{ @@ -311,7 +301,7 @@ name: exporter-config testConfigMap := new(corev1.ConfigMap) testConfigMap.Name = "test-web-conf" - assert.NilError(t, addPGMonitorExporterToInstancePodSpec(cluster, template, exporterQueriesConfig, testConfigMap)) + assert.NilError(t, addPGMonitorExporterToInstancePodSpec(ctx, cluster, template, exporterQueriesConfig, testConfigMap)) assert.Equal(t, len(template.Spec.Containers), 2) container := template.Spec.Containers[1] @@ -340,7 +330,7 @@ name: exporter-config assert.Assert(t, cmp.Contains(command, "postgres_exporter")) assert.Assert(t, cmp.Contains(command, "--web.config.file")) - testExporterCollectorsAnnotation(t, cluster, exporterQueriesConfig, testConfigMap) + testExporterCollectorsAnnotation(t, ctx, cluster, exporterQueriesConfig, testConfigMap) }) } @@ -506,8 +496,8 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { ctx := context.Background() var called bool reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -530,8 +520,8 @@ func TestReconcilePGMonitorExporter(t *testing.T) { ctx := context.Background() var called bool reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -612,7 +602,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { podExecCalled: false, // Status was generated manually for this test case // TODO (jmckulk): add code to generate status - status: v1beta1.MonitoringStatus{ExporterConfiguration: "7cdb484b6c"}, + status: v1beta1.MonitoringStatus{ExporterConfiguration: "6d874c58df"}, statusChangedAfterReconcile: false, }} { t.Run(test.name, func(t *testing.T) { @@ -624,8 +614,8 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { // Create reconciler with mock PodExec function reconciler := &Reconciler{ - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index fd769cce7d..0314ad4406 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index fe6bc12320..c2fe7af82a 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/pod_disruption_budget.go b/internal/controller/postgrescluster/pod_disruption_budget.go index 56ac388fa2..4bff4a9743 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget.go +++ b/internal/controller/postgrescluster/pod_disruption_budget.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -75,5 +64,5 @@ func getMinAvailable( } // If more than one replica is not defined, we will default to '0' - return initialize.IntOrStringInt32(expect) + return initialize.Pointer(intstr.FromInt32(expect)) } diff --git a/internal/controller/postgrescluster/pod_disruption_budget_test.go b/internal/controller/postgrescluster/pod_disruption_budget_test.go index 434d11f4ed..55e2bb63c6 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget_test.go +++ b/internal/controller/postgrescluster/pod_disruption_budget_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -61,7 +50,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { "anno-key": "anno-value", }, } - minAvailable = initialize.IntOrStringInt32(1) + minAvailable = initialize.Pointer(intstr.FromInt32(1)) selector := metav1.LabelSelector{ MatchLabels: map[string]string{ "key": "value", @@ -89,19 +78,19 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { func TestGetMinAvailable(t *testing.T) { t.Run("minAvailable provided", func(t *testing.T) { // minAvailable is defined so use that value - ma := initialize.IntOrStringInt32(0) + ma := initialize.Pointer(intstr.FromInt32(0)) expect := getMinAvailable(ma, 1) assert.Equal(t, *expect, intstr.FromInt(0)) - ma = initialize.IntOrStringInt32(1) + ma = initialize.Pointer(intstr.FromInt32(1)) expect = getMinAvailable(ma, 2) assert.Equal(t, *expect, intstr.FromInt(1)) - ma = initialize.IntOrStringString("50%") + ma = initialize.Pointer(intstr.FromString("50%")) expect = getMinAvailable(ma, 3) assert.Equal(t, *expect, intstr.FromString("50%")) - ma = initialize.IntOrStringString("200%") + ma = initialize.Pointer(intstr.FromString("200%")) expect = getMinAvailable(ma, 2147483647) assert.Equal(t, *expect, intstr.FromString("200%")) }) diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index 227a3b6458..312079d824 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -29,11 +18,13 @@ import ( "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" @@ -54,7 +45,7 @@ func (r *Reconciler) generatePostgresUserSecret( username := string(spec.Name) intent := &corev1.Secret{ObjectMeta: naming.PostgresUserSecret(cluster, username)} intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - initialize.ByteMap(&intent.Data) + initialize.Map(&intent.Data) // Populate the Secret with libpq keywords for connecting through // the primary Service. @@ -198,14 +189,14 @@ func (r *Reconciler) reconcilePostgresDatabases( ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } // Gather the list of database that should exist in PostgreSQL. - databases := sets.String{} + databases := sets.Set[string]{} if cluster.Spec.Users == nil { // Users are unspecified; create one database matching the cluster name // if it is also a valid database name. @@ -230,8 +221,6 @@ func (r *Reconciler) reconcilePostgresDatabases( } } - // Calculate a hash of the SQL that should be executed in PostgreSQL. - var pgAuditOK, postgisInstallOK bool create := func(ctx context.Context, exec postgres.Executor) error { if pgAuditOK = pgaudit.EnableInPostgreSQL(ctx, exec) == nil; !pgAuditOK { @@ -255,9 +244,10 @@ func (r *Reconciler) reconcilePostgresDatabases( "Unable to install PostGIS") } - return postgres.CreateDatabasesInPostgreSQL(ctx, exec, databases.List()) + return postgres.CreateDatabasesInPostgreSQL(ctx, exec, sets.List(databases)) } + // Calculate a hash of the SQL that should be executed in PostgreSQL. revision, err := safeHash32(func(hasher io.Writer) error { // Discard log messages about executing SQL. return create(logging.NewContext(ctx, logging.Discard()), func( @@ -515,9 +505,9 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } break } @@ -534,7 +524,7 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( } write := func(ctx context.Context, exec postgres.Executor) error { - return postgres.WriteUsersInPostgreSQL(ctx, exec, specUsers, verifiers) + return postgres.WriteUsersInPostgreSQL(ctx, cluster, exec, specUsers, verifiers) } revision, err := safeHash32(func(hasher io.Writer) error { @@ -579,7 +569,7 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( func (r *Reconciler) reconcilePostgresDataVolume( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceSpec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []corev1.PersistentVolumeClaim, sourceCluster *v1beta1.PostgresCluster, ) (*corev1.PersistentVolumeClaim, error) { labelMap := map[string]string{ @@ -620,6 +610,38 @@ func (r *Reconciler) reconcilePostgresDataVolume( pvc.Spec = instanceSpec.DataVolumeClaimSpec + // If a source cluster was provided and VolumeSnapshots are turned on in the source cluster and + // there is a VolumeSnapshot available for the source cluster that is ReadyToUse, use it as the + // source for the PVC. If there is an error when retrieving VolumeSnapshots, or no ReadyToUse + // snapshots were found, create a warning event, but continue creating PVC in the usual fashion. + if sourceCluster != nil && sourceCluster.Spec.Backups.Snapshots != nil && feature.Enabled(ctx, feature.VolumeSnapshots) { + snapshots, err := r.getSnapshotsForCluster(ctx, sourceCluster) + if err == nil { + snapshot := getLatestReadySnapshot(snapshots) + if snapshot != nil { + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "BootstrappingWithSnapshot", + "Snapshot found for %v; bootstrapping cluster with snapshot.", sourceCluster.Name) + pvc.Spec.DataSource = &corev1.TypedLocalObjectReference{ + APIGroup: initialize.String("snapshot.storage.k8s.io"), + Kind: snapshot.Kind, + Name: snapshot.Name, + } + } else { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "SnapshotNotFound", + "No ReadyToUse snapshots were found for %v; proceeding with typical restore process.", sourceCluster.Name) + } + } else { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "SnapshotNotFound", + "Could not get snapshots for %v, proceeding with typical restore process.", sourceCluster.Name) + } + } + + r.setVolumeSize(ctx, cluster, pvc, instanceSpec.Name) + + // Clear any set limit before applying PVC. This is needed to allow the limit + // value to change later. + pvc.Spec.Resources.Limits = nil + if err == nil { err = r.handlePersistentVolumeClaimError(cluster, errors.WithStack(r.apply(ctx, pvc))) @@ -628,6 +650,75 @@ func (r *Reconciler) reconcilePostgresDataVolume( return pvc, err } +// setVolumeSize compares the potential sizes from the instance spec, status +// and limit and sets the appropriate current value. +func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.PostgresCluster, + pvc *corev1.PersistentVolumeClaim, instanceSpecName string) { + log := logging.FromContext(ctx) + + // Store the limit for this instance set. This value will not change below. + volumeLimitFromSpec := pvc.Spec.Resources.Limits.Storage() + + // Capture the largest pgData volume size currently defined for a given instance set. + // This value will capture our desired update. + volumeRequestSize := pvc.Spec.Resources.Requests.Storage() + + // If the request value is greater than the set limit, use the limit and issue + // a warning event. A limit of 0 is ignorned. + if !volumeLimitFromSpec.IsZero() && + volumeRequestSize.Value() > volumeLimitFromSpec.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "VolumeRequestOverLimit", + "pgData volume request (%v) for %s/%s is greater than set limit (%v). Limit value will be used.", + volumeRequestSize, cluster.Name, instanceSpecName, volumeLimitFromSpec) + + pvc.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceStorage: *resource.NewQuantity(volumeLimitFromSpec.Value(), resource.BinarySI), + } + // Otherwise, if the limit is not set or the feature gate is not enabled, do not autogrow. + } else if !volumeLimitFromSpec.IsZero() && feature.Enabled(ctx, feature.AutoGrowVolumes) { + for i := range cluster.Status.InstanceSets { + if instanceSpecName == cluster.Status.InstanceSets[i].Name { + for _, dpv := range cluster.Status.InstanceSets[i].DesiredPGDataVolume { + if dpv != "" { + desiredRequest, err := resource.ParseQuantity(dpv) + if err == nil { + if desiredRequest.Value() > volumeRequestSize.Value() { + volumeRequestSize = &desiredRequest + } + } else { + log.Error(err, "Unable to parse volume request: "+dpv) + } + } + } + } + } + + // If the volume request size is greater than or equal to the limit and the + // limit is not zero, update the request size to the limit value. + // If the user manually requests a lower limit that is smaller than the current + // or requested volume size, it will be ignored in favor of the limit value. + if volumeRequestSize.Value() >= volumeLimitFromSpec.Value() { + + r.Recorder.Eventf(cluster, corev1.EventTypeNormal, "VolumeLimitReached", + "pgData volume(s) for %s/%s are at size limit (%v).", cluster.Name, + instanceSpecName, volumeLimitFromSpec) + + // If the volume size request is greater than the limit, issue an + // additional event warning. + if volumeRequestSize.Value() > volumeLimitFromSpec.Value() { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "DesiredVolumeAboveLimit", + "The desired size (%v) for the %s/%s pgData volume(s) is greater than the size limit (%v).", + volumeRequestSize, cluster.Name, instanceSpecName, volumeLimitFromSpec) + } + + volumeRequestSize = volumeLimitFromSpec + } + pvc.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceStorage: *resource.NewQuantity(volumeRequestSize.Value(), resource.BinarySI), + } + } +} + // +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,patch} // reconcileTablespaceVolumes writes the PersistentVolumeClaims for instance's @@ -638,7 +729,7 @@ func (r *Reconciler) reconcileTablespaceVolumes( clusterVolumes []corev1.PersistentVolumeClaim, ) (tablespaceVolumes []*corev1.PersistentVolumeClaim, err error) { - if !util.DefaultMutableFeatureGate.Enabled(util.TablespaceVolumes) { + if !feature.Enabled(ctx, feature.TablespaceVolumes) { return } @@ -765,7 +856,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( // This assumes that $PGDATA matches the configured PostgreSQL "data_directory". var stdout bytes.Buffer err = errors.WithStack(r.PodExec( - observed.Pods[0].Namespace, observed.Pods[0].Name, naming.ContainerDatabase, + ctx, observed.Pods[0].Namespace, observed.Pods[0].Name, naming.ContainerDatabase, nil, &stdout, nil, "bash", "-ceu", "--", `exec realpath "${PGDATA}/pg_wal"`)) walDirectory = strings.TrimRight(stdout.String(), "\n") @@ -869,9 +960,9 @@ func (r *Reconciler) reconcileDatabaseInitSQL(ctx context.Context, } podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, naming.ContainerDatabase, stdin, stdout, stderr, command...) } // A writable pod executor has been found and we have the sql provided by diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 84a380f011..0780b0f577 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -21,18 +10,23 @@ import ( "io" "testing" + "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" "github.com/crunchydata/postgres-operator/internal/testing/cmp" @@ -247,26 +241,116 @@ func TestReconcilePostgresVolumes(t *testing.T) { Owner: client.FieldOwner(t.Name()), } - cluster := testCluster() - cluster.Namespace = setupNamespace(t, tClient).Name + t.Run("DataVolumeNoSourceCluster", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name - assert.NilError(t, tClient.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) - spec := &v1beta1.PostgresInstanceSetSpec{} - assert.NilError(t, yaml.Unmarshal([]byte(`{ - name: "some-instance", - dataVolumeClaimSpec: { - accessModes: [ReadWriteOnce], - resources: { requests: { storage: 1Gi } }, - storageClassName: "storage-class-for-data", - }, - }`), spec)) + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, nil) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + + assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + }) - instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + t.Run("DataVolumeSourceClusterWithGoodSnapshot", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name - t.Run("DataVolume", func(t *testing.T) { - pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil) + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler.Recorder = recorder + + // Turn on VolumeSnapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create source cluster and enable snapshots + sourceCluster := testCluster() + sourceCluster.Namespace = ns.Name + sourceCluster.Name = "rhino" + sourceCluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "some-class-name", + } + + // Create a snapshot + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := reconciler.apply(ctx, snapshot) + assert.NilError(t, err) + + // Get snapshot and update Status.ReadyToUse and CreationTime + err = reconciler.Client.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) + assert.NilError(t, err) + + currentTime := metav1.Now() + snapshot.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(true), + CreationTime: ¤tTime, + } + err = reconciler.Client.Status().Update(ctx, snapshot) + assert.NilError(t, err) + + // Reconcile volume + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, sourceCluster) assert.NilError(t, err) assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) @@ -276,18 +360,111 @@ func TestReconcilePostgresVolumes(t *testing.T) { assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteOnce +dataSource: + apiGroup: snapshot.storage.k8s.io + kind: VolumeSnapshot + name: some-snapshot +dataSourceRef: + apiGroup: snapshot.storage.k8s.io + kind: VolumeSnapshot + name: some-snapshot resources: requests: storage: 1Gi storageClassName: storage-class-for-data volumeMode: Filesystem `)) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "BootstrappingWithSnapshot") + assert.Equal(t, recorder.Events[0].Note, "Snapshot found for rhino; bootstrapping cluster with snapshot.") + }) + + t.Run("DataVolumeSourceClusterSnapshotsEnabledNoSnapshots", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name + + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler.Recorder = recorder + + // Turn on VolumeSnapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create source cluster and enable snapshots + sourceCluster := testCluster() + sourceCluster.Namespace = ns.Name + sourceCluster.Name = "rhino" + sourceCluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "some-class-name", + } + + // Reconcile volume + pvc, err := reconciler.reconcilePostgresDataVolume(ctx, cluster, spec, instance, nil, sourceCluster) + assert.NilError(t, err) + + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + + assert.Equal(t, pvc.Labels[naming.LabelCluster], cluster.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstance], instance.Name) + assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) + assert.Equal(t, pvc.Labels[naming.LabelRole], "pgdata") + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +storageClassName: storage-class-for-data +volumeMode: Filesystem + `)) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "SnapshotNotFound") + assert.Equal(t, recorder.Events[0].Note, "No ReadyToUse snapshots were found for rhino; proceeding with typical restore process.") }) t.Run("WALVolume", func(t *testing.T) { + cluster := testCluster() + ns := setupNamespace(t, tClient) + cluster.Namespace = ns.Name + + assert.NilError(t, tClient.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, tClient.Delete(ctx, cluster)) }) + + spec := &v1beta1.PostgresInstanceSetSpec{} + assert.NilError(t, yaml.Unmarshal([]byte(`{ + name: "some-instance", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Gi } }, + storageClassName: "storage-class-for-data", + }, + }`), spec)) + instance := &appsv1.StatefulSet{ObjectMeta: naming.GenerateInstance(cluster, spec)} + observed := &Instance{} t.Run("None", func(t *testing.T) { @@ -316,7 +493,7 @@ volumeMode: Filesystem assert.Equal(t, pvc.Labels[naming.LabelInstanceSet], spec.Name) assert.Equal(t, pvc.Labels[naming.LabelRole], "pgwal") - assert.Assert(t, marshalMatches(pvc.Spec, ` + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` accessModes: - ReadWriteMany resources: @@ -355,7 +532,7 @@ volumeMode: Filesystem expected := errors.New("flop") reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, _ io.Reader, _, _ io.Writer, command ...string, ) error { assert.Equal(t, namespace, "pod-ns") @@ -372,7 +549,7 @@ volumeMode: Filesystem // Files are in the wrong place; expect no changes to the PVC. reconciler.PodExec = func( - _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, ) error { assert.Assert(t, stdout != nil) _, err := stdout.Write([]byte("some-place\n")) @@ -395,7 +572,7 @@ volumeMode: Filesystem new(corev1.ContainerStateRunning) reconciler.PodExec = func( - _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, + ctx context.Context, _, _, _ string, _ io.Reader, stdout, _ io.Writer, _ ...string, ) error { assert.Assert(t, stdout != nil) _, err := stdout.Write([]byte(postgres.WALDirectory(cluster, spec) + "\n")) @@ -425,6 +602,318 @@ volumeMode: Filesystem }) } +func TestSetVolumeSize(t *testing.T) { + t.Parallel() + + ctx := context.Background() + cluster := v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant", + Namespace: "test-namespace", + }, + Spec: v1beta1.PostgresClusterSpec{ + InstanceSets: []v1beta1.PostgresInstanceSetSpec{{ + Name: "some-instance", + Replicas: initialize.Int32(1), + }}, + }, + } + + instance := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant-some-instance-wxyz-0", + Namespace: cluster.Namespace, + }} + + setupLogCapture := func(ctx context.Context) (context.Context, *[]string) { + calls := []string{} + testlog := funcr.NewJSON(func(object string) { + calls = append(calls, object) + }, funcr.Options{ + Verbosity: 1, + }) + return logging.NewContext(ctx, testlog), &calls + } + + // helper functions + instanceSetSpec := func(request, limit string) *v1beta1.PostgresInstanceSetSpec { + return &v1beta1.PostgresInstanceSetSpec{ + Name: "some-instance", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse(request), + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse(limit), + }}}} + } + + desiredStatus := func(request string) v1beta1.PostgresClusterStatus { + desiredMap := make(map[string]string) + desiredMap["elephant-some-instance-wxyz-0"] = request + return v1beta1.PostgresClusterStatus{ + InstanceSets: []v1beta1.PostgresInstanceSetStatus{{ + Name: "some-instance", + DesiredPGDataVolume: desiredMap, + }}} + } + + t.Run("RequestAboveLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("4Gi", "3Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 3Gi +`)) + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeRequestOverLimit") + assert.Equal(t, recorder.Events[0].Note, "pgData volume request (4Gi) for elephant/some-instance is greater than set limit (3Gi). Limit value will be used.") + }) + + t.Run("NoFeatureGate", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + + desiredMap := make(map[string]string) + desiredMap["elephant-some-instance-wxyz-0"] = "2Gi" + cluster.Status = v1beta1.PostgresClusterStatus{ + InstanceSets: []v1beta1.PostgresInstanceSetStatus{{ + Name: "some-instance", + DesiredPGDataVolume: desiredMap, + }}, + } + + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 1Gi + `)) + + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + + // clear status for other tests + cluster.Status = v1beta1.PostgresClusterStatus{} + }) + + t.Run("FeatureEnabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.AutoGrowVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + t.Run("StatusNoLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := &v1beta1.PostgresInstanceSetSpec{ + Name: "some-instance", + DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }}}} + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + requests: + storage: 1Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + + // clear status for other tests + cluster.Status = v1beta1.PostgresClusterStatus{} + }) + + t.Run("LimitNoStatus", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 2Gi + requests: + storage: 1Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + }) + + t.Run("BadStatusWithLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + cluster.Status = desiredStatus("NotAValidValue") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 1Gi +`)) + + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 1) + assert.Assert(t, cmp.Contains((*logs)[0], "Unable to parse volume request: NotAValidValue")) + }) + + t.Run("StatusWithLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "3Gi") + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 3Gi + requests: + storage: 2Gi +`)) + assert.Equal(t, len(recorder.Events), 0) + assert.Equal(t, len(*logs), 0) + }) + + t.Run("StatusWithLimitGrowToLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("1Gi", "2Gi") + cluster.Status = desiredStatus("2Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 2Gi + requests: + storage: 2Gi +`)) + + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "VolumeLimitReached") + assert.Equal(t, recorder.Events[0].Note, "pgData volume(s) for elephant/some-instance are at size limit (2Gi).") + }) + + t.Run("DesiredStatusOverLimit", func(t *testing.T) { + recorder := events.NewRecorder(t, runtime.Scheme) + reconciler := &Reconciler{Recorder: recorder} + ctx, logs := setupLogCapture(ctx) + + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.InstancePostgresDataVolume(instance)} + spec := instanceSetSpec("4Gi", "5Gi") + cluster.Status = desiredStatus("10Gi") + pvc.Spec = spec.DataVolumeClaimSpec + + reconciler.setVolumeSize(ctx, &cluster, pvc, spec.Name) + + assert.Assert(t, cmp.MarshalMatches(pvc.Spec, ` +accessModes: +- ReadWriteOnce +resources: + limits: + storage: 5Gi + requests: + storage: 5Gi +`)) + + assert.Equal(t, len(*logs), 0) + assert.Equal(t, len(recorder.Events), 2) + var found1, found2 bool + for _, event := range recorder.Events { + if event.Reason == "VolumeLimitReached" { + found1 = true + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Note, "pgData volume(s) for elephant/some-instance are at size limit (5Gi).") + } + if event.Reason == "DesiredVolumeAboveLimit" { + found2 = true + assert.Equal(t, event.Regarding.Name, cluster.Name) + assert.Equal(t, event.Note, + "The desired size (10Gi) for the elephant/some-instance pgData volume(s) is greater than the size limit (5Gi).") + } + } + assert.Assert(t, found1 && found2) + }) + + }) +} + func TestReconcileDatabaseInitSQL(t *testing.T) { ctx := context.Background() var called bool @@ -438,8 +927,8 @@ func TestReconcileDatabaseInitSQL(t *testing.T) { // Overwrite the PodExec function with a check to ensure the exec // call would have been made - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, @@ -562,8 +1051,8 @@ func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { // Overwrite the PodExec function with a check to ensure the exec // call would have been made - PodExec: func(namespace, pod, container string, stdin io.Reader, stdout, - stderr io.Writer, command ...string) error { + PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, + stdout, stderr io.Writer, command ...string) error { called = true return nil }, diff --git a/internal/controller/postgrescluster/rbac.go b/internal/controller/postgrescluster/rbac.go index 80c7ccf678..38dd808c44 100644 --- a/internal/controller/postgrescluster/rbac.go +++ b/internal/controller/postgrescluster/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go new file mode 100644 index 0000000000..76ad195600 --- /dev/null +++ b/internal/controller/postgrescluster/snapshots.go @@ -0,0 +1,617 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +//+kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources="volumesnapshots",verbs={get,list,create,patch,delete} + +// The controller-runtime client sets up a cache that watches anything we "get" or "list". +//+kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources="volumesnapshots",verbs={watch} + +// reconcileVolumeSnapshots creates and manages VolumeSnapshots if the proper VolumeSnapshot CRDs +// are installed and VolumeSnapshots are enabled for the PostgresCluster. A VolumeSnapshot of the +// primary instance's pgdata volume will be created whenever a backup is completed. The steps to +// create snapshots include the following sequence: +// 1. We find the latest completed backup job and check the timestamp. +// 2. If the timestamp is later than what's on the dedicated snapshot PVC, a restore job runs in +// the dedicated snapshot volume. +// 3. When the restore job completes, an annotation is updated on the PVC. If the restore job +// fails, we don't run it again. +// 4. When the PVC annotation is updated, we see if there's a volume snapshot with an earlier +// timestamp. +// 5. If there are no snapshots at all, we take a snapshot and put the backup job's completion +// timestamp on the snapshot annotation. +// 6. If an earlier snapshot is found, we take a new snapshot, annotate it and delete the old +// snapshot. +// 7. When the snapshot job completes, we delete the restore job. +func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster, pvc *corev1.PersistentVolumeClaim) error { + + // If VolumeSnapshots feature gate is disabled. Do nothing and return early. + if !feature.Enabled(ctx, feature.VolumeSnapshots) { + return nil + } + + // Check if the Kube cluster has VolumeSnapshots installed. If VolumeSnapshots + // are not installed, we need to return early. If user is attempting to use + // VolumeSnapshots, return an error, otherwise return nil. + volumeSnapshotKindExists, err := r.GroupVersionKindExists("snapshot.storage.k8s.io/v1", "VolumeSnapshot") + if err != nil { + return err + } + if !*volumeSnapshotKindExists { + if postgrescluster.Spec.Backups.Snapshots != nil { + return errors.New("VolumeSnapshots are not installed/enabled in this Kubernetes cluster; cannot create snapshot.") + } else { + return nil + } + } + + // If user is attempting to use snapshots and has tablespaces enabled, we + // need to create a warning event indicating that the two features are not + // currently compatible and return early. + if postgrescluster.Spec.Backups.Snapshots != nil && + clusterUsingTablespaces(ctx, postgrescluster) { + r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "IncompatibleFeatures", + "VolumeSnapshots not currently compatible with TablespaceVolumes; cannot create snapshot.") + return nil + } + + // Get all snapshots for the cluster. + snapshots, err := r.getSnapshotsForCluster(ctx, postgrescluster) + if err != nil { + return err + } + + // If snapshots are disabled, delete any existing snapshots and return early. + if postgrescluster.Spec.Backups.Snapshots == nil { + return r.deleteSnapshots(ctx, postgrescluster, snapshots) + } + + // If we got here, then the snapshots are enabled (feature gate is enabled and the + // cluster has a Spec.Backups.Snapshots section defined). + + // Check snapshots for errors; if present, create an event. If there are + // multiple snapshots with errors, create event for the latest error and + // delete any older snapshots with error. + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) + if snapshotWithLatestError != nil { + r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "VolumeSnapshotError", + *snapshotWithLatestError.Status.Error.Message) + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.Error != nil && + snapshot.Status.Error.Time.Before(snapshotWithLatestError.Status.Error.Time) { + err = r.deleteControlled(ctx, postgrescluster, &snapshot) + if err != nil { + return err + } + } + } + } + + // Get pvc backup job completion annotation. If it does not exist, there has not been + // a successful restore yet, so return early. + pvcUpdateTimeStamp, pvcAnnotationExists := pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if !pvcAnnotationExists { + return err + } + + // Check to see if snapshot exists for the latest backup that has been restored into + // the dedicated pvc. + var snapshotForPvcUpdateIdx int + snapshotFoundForPvcUpdate := false + for idx, snapshot := range snapshots.Items { + if snapshot.GetAnnotations()[naming.PGBackRestBackupJobCompletion] == pvcUpdateTimeStamp { + snapshotForPvcUpdateIdx = idx + snapshotFoundForPvcUpdate = true + } + } + + // If a snapshot exists for the latest backup that has been restored into the dedicated pvc + // and the snapshot is Ready, delete all other snapshots. + if snapshotFoundForPvcUpdate && snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse != nil && + *snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse { + for idx, snapshot := range snapshots.Items { + if idx != snapshotForPvcUpdateIdx { + err = r.deleteControlled(ctx, postgrescluster, &snapshot) + if err != nil { + return err + } + } + } + } + + // If a snapshot for the latest backup/restore does not exist, create a snapshot. + if !snapshotFoundForPvcUpdate { + var snapshot *volumesnapshotv1.VolumeSnapshot + snapshot, err = r.generateSnapshotOfDedicatedSnapshotVolume(postgrescluster, pvc) + if err == nil { + err = errors.WithStack(r.apply(ctx, snapshot)) + } + } + + return err +} + +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={get} +// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={create,delete,patch} + +// reconcileDedicatedSnapshotVolume reconciles the PersistentVolumeClaim that holds a +// copy of the pgdata and is dedicated for clean snapshots of the database. It creates +// and manages the volume as well as the restore jobs that bring the volume data forward +// after a successful backup. +func (r *Reconciler) reconcileDedicatedSnapshotVolume( + ctx context.Context, cluster *v1beta1.PostgresCluster, + clusterVolumes []corev1.PersistentVolumeClaim, +) (*corev1.PersistentVolumeClaim, error) { + + // If VolumeSnapshots feature gate is disabled, do nothing and return early. + if !feature.Enabled(ctx, feature.VolumeSnapshots) { + return nil, nil + } + + // Set appropriate labels for dedicated snapshot volume + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + } + + // If volume already exists, use existing name. Otherwise, generate a name. + var pvc *corev1.PersistentVolumeClaim + existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) + if err != nil { + return nil, errors.WithStack(err) + } + if existingPVCName != "" { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: existingPVCName, + }} + } else { + pvc = &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterDedicatedSnapshotVolume(cluster)} + } + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + // If snapshots are disabled, delete the PVC if it exists and return early. + // Check the client cache first using Get. + if cluster.Spec.Backups.Snapshots == nil { + key := client.ObjectKeyFromObject(pvc) + err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + if err == nil { + err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) + } + return nil, client.IgnoreNotFound(err) + } + + // If we've got this far, snapshots are enabled so we should create/update/get + // the dedicated snapshot volume + pvc, err = r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) + if err != nil { + return pvc, err + } + + // Determine if we need to run a restore job, based on the most recent backup + // and an annotation on the PVC. + + // Find the most recently completed backup job. + backupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + if err != nil { + return pvc, err + } + + // Return early if no complete backup job is found. + if backupJob == nil { + return pvc, nil + } + + // Return early if the pvc is annotated with a timestamp newer or equal to the latest backup job. + // If the annotation value cannot be parsed, we want to proceed with a restore. + pvcAnnotationTimestampString := pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if pvcAnnotationTime, err := time.Parse(time.RFC3339, pvcAnnotationTimestampString); err == nil { + if backupJob.Status.CompletionTime.Compare(pvcAnnotationTime) <= 0 { + return pvc, nil + } + } + + // If we've made it here, the pvc has not been restored with latest backup. + // Find the dedicated snapshot volume restore job if it exists. Since we delete + // successful restores after we annotate the PVC and stop making restore jobs + // if a failed DSV restore job exists, there should only ever be one DSV restore + // job in existence at a time. + // TODO(snapshots): Should this function throw an error or something if multiple + // DSV restores somehow exist? + restoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + if err != nil { + return pvc, err + } + + // If we don't find a restore job, we run one. + if restoreJob == nil { + err = r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) + return pvc, err + } + + // If we've made it here, we have found a restore job. If the restore job was + // successful, set/update the annotation on the PVC and delete the restore job. + if restoreJob.Status.Succeeded == 1 { + if pvc.GetAnnotations() == nil { + pvc.Annotations = map[string]string{} + } + pvc.Annotations[naming.PGBackRestBackupJobCompletion] = restoreJob.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + annotations := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, + naming.PGBackRestBackupJobCompletion, pvc.Annotations[naming.PGBackRestBackupJobCompletion]) + + patch := client.RawPatch(client.Merge.Type(), []byte(annotations)) + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.patch(ctx, pvc, patch))) + + if err != nil { + return pvc, err + } + + err = r.Client.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) + return pvc, errors.WithStack(err) + } + + // If the restore job failed, create a warning event. + if restoreJob.Status.Failed == 1 { + r.Recorder.Event(cluster, corev1.EventTypeWarning, + "DedicatedSnapshotVolumeRestoreJobError", "restore job failed, check the logs") + return pvc, nil + } + + // If we made it here, the restore job is still running and we should do nothing. + return pvc, err +} + +// createDedicatedSnapshotVolume creates/updates/gets the dedicated snapshot volume. +// It expects that the volume name and GVK has already been set on the pvc that is passed in. +func (r *Reconciler) createDedicatedSnapshotVolume(ctx context.Context, + cluster *v1beta1.PostgresCluster, labelMap map[string]string, + pvc *corev1.PersistentVolumeClaim, +) (*corev1.PersistentVolumeClaim, error) { + var err error + + // An InstanceSet must be chosen to scale resources for the dedicated snapshot volume. + // TODO: We've chosen the first InstanceSet for the time being, but might want to consider + // making the choice configurable. + instanceSpec := cluster.Spec.InstanceSets[0] + + pvc.Annotations = naming.Merge( + cluster.Spec.Metadata.GetAnnotationsOrNil(), + instanceSpec.Metadata.GetAnnotationsOrNil()) + + pvc.Labels = naming.Merge( + cluster.Spec.Metadata.GetLabelsOrNil(), + instanceSpec.Metadata.GetLabelsOrNil(), + labelMap, + ) + + err = errors.WithStack(r.setControllerReference(cluster, pvc)) + if err != nil { + return pvc, err + } + + pvc.Spec = instanceSpec.DataVolumeClaimSpec + + // Set the snapshot volume to the same size as the pgdata volume. The size should scale with auto-grow. + r.setVolumeSize(ctx, cluster, pvc, instanceSpec.Name) + + // Clear any set limit before applying PVC. This is needed to allow the limit + // value to change later. + pvc.Spec.Resources.Limits = nil + + err = r.handlePersistentVolumeClaimError(cluster, + errors.WithStack(r.apply(ctx, pvc))) + if err != nil { + return pvc, err + } + + return pvc, err +} + +// dedicatedSnapshotVolumeRestore creates a Job that performs a restore into the dedicated +// snapshot volume. +// This function is very similar to reconcileRestoreJob, but specifically tailored to the +// dedicated snapshot volume. +func (r *Reconciler) dedicatedSnapshotVolumeRestore(ctx context.Context, + cluster *v1beta1.PostgresCluster, dedicatedSnapshotVolume *corev1.PersistentVolumeClaim, + backupJob *batchv1.Job, +) error { + + pgdata := postgres.DataDirectory(cluster) + repoName := backupJob.GetLabels()[naming.LabelPGBackRestRepo] + + opts := []string{ + "--stanza=" + pgbackrest.DefaultStanzaName, + "--pg1-path=" + pgdata, + "--repo=" + regexRepoIndex.FindString(repoName), + "--delta", + } + + cmd := pgbackrest.DedicatedSnapshotVolumeRestoreCommand(pgdata, strings.Join(opts, " ")) + + // Create the volume resources required for the Postgres data directory. + dataVolumeMount := postgres.DataVolumeMount() + dataVolume := corev1.Volume{ + Name: dataVolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: dedicatedSnapshotVolume.GetName(), + }, + }, + } + volumes := []corev1.Volume{dataVolume} + volumeMounts := []corev1.VolumeMount{dataVolumeMount} + + _, configHash, err := pgbackrest.CalculateConfigHashes(cluster) + if err != nil { + return err + } + + // A DataSource is required to avoid a nil pointer exception. + fakeDataSource := &v1beta1.PostgresClusterDataSource{RepoName: ""} + + restoreJob := &batchv1.Job{} + instanceName := cluster.Status.StartupInstance + + if err := r.generateRestoreJobIntent(cluster, configHash, instanceName, cmd, + volumeMounts, volumes, fakeDataSource, restoreJob); err != nil { + return errors.WithStack(err) + } + + // Attempt the restore exactly once. If the restore job fails, we prompt the user to investigate. + restoreJob.Spec.BackoffLimit = initialize.Int32(0) + restoreJob.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + + // Add pgBackRest configs to template. + pgbackrest.AddConfigToRestorePod(cluster, cluster, &restoreJob.Spec.Template.Spec) + + // Add nss_wrapper init container and add nss_wrapper env vars to the pgbackrest restore container. + addNSSWrapper( + config.PGBackRestContainerImage(cluster), + cluster.Spec.ImagePullPolicy, + &restoreJob.Spec.Template) + + addTMPEmptyDir(&restoreJob.Spec.Template) + + restoreJob.Annotations[naming.PGBackRestBackupJobCompletion] = backupJob.Status.CompletionTime.Format(time.RFC3339) + return errors.WithStack(r.apply(ctx, restoreJob)) +} + +// generateSnapshotOfDedicatedSnapshotVolume will generate a VolumeSnapshot of +// the dedicated snapshot PersistentVolumeClaim and annotate it with the +// provided backup job's UID. +func (r *Reconciler) generateSnapshotOfDedicatedSnapshotVolume( + postgrescluster *v1beta1.PostgresCluster, + dedicatedSnapshotVolume *corev1.PersistentVolumeClaim, +) (*volumesnapshotv1.VolumeSnapshot, error) { + + snapshot, err := r.generateVolumeSnapshot(postgrescluster, *dedicatedSnapshotVolume, + postgrescluster.Spec.Backups.Snapshots.VolumeSnapshotClassName) + if err == nil { + if snapshot.Annotations == nil { + snapshot.Annotations = map[string]string{} + } + snapshot.Annotations[naming.PGBackRestBackupJobCompletion] = dedicatedSnapshotVolume.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + } + + return snapshot, err +} + +// generateVolumeSnapshot generates a VolumeSnapshot that will use the supplied +// PersistentVolumeClaim and VolumeSnapshotClassName and will set the provided +// PostgresCluster as the owner. +func (r *Reconciler) generateVolumeSnapshot(postgrescluster *v1beta1.PostgresCluster, + pvc corev1.PersistentVolumeClaim, volumeSnapshotClassName string, +) (*volumesnapshotv1.VolumeSnapshot, error) { + + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: naming.ClusterVolumeSnapshot(postgrescluster), + } + snapshot.Spec.Source.PersistentVolumeClaimName = &pvc.Name + snapshot.Spec.VolumeSnapshotClassName = &volumeSnapshotClassName + + snapshot.Annotations = postgrescluster.Spec.Metadata.GetAnnotationsOrNil() + snapshot.Labels = naming.Merge(postgrescluster.Spec.Metadata.GetLabelsOrNil(), + map[string]string{ + naming.LabelCluster: postgrescluster.Name, + }) + + err := errors.WithStack(r.setControllerReference(postgrescluster, snapshot)) + + return snapshot, err +} + +// getDedicatedSnapshotVolumeRestoreJob finds a dedicated snapshot volume (DSV) +// restore job if one exists. Since we delete successful restore jobs and stop +// creating new restore jobs when one fails, there should only ever be one DSV +// restore job present at a time. If a DSV restore cannot be found, we return nil. +func (r *Reconciler) getDedicatedSnapshotVolumeRestoreJob(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster) (*batchv1.Job, error) { + + // Get all restore jobs for this cluster + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(postgrescluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(postgrescluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + } + if err != nil { + return nil, err + } + + // Get restore job that has PGBackRestBackupJobCompletion annotation + for _, job := range jobs.Items { + _, annotationExists := job.GetAnnotations()[naming.PGBackRestBackupJobCompletion] + if annotationExists { + return &job, nil + } + } + + return nil, nil +} + +// getLatestCompleteBackupJob finds the most recently completed +// backup job for a cluster +func (r *Reconciler) getLatestCompleteBackupJob(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster) (*batchv1.Job, error) { + + // Get all backup jobs for this cluster + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterBackupJobs(postgrescluster.Name)) + if err == nil { + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(postgrescluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + } + if err != nil { + return nil, err + } + + zeroTime := metav1.NewTime(time.Time{}) + latestCompleteBackupJob := batchv1.Job{ + Status: batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &zeroTime, + }, + } + for _, job := range jobs.Items { + if job.Status.Succeeded > 0 && + latestCompleteBackupJob.Status.CompletionTime.Before(job.Status.CompletionTime) { + latestCompleteBackupJob = job + } + } + + if latestCompleteBackupJob.Status.CompletionTime.Equal(&zeroTime) { + return nil, nil + } + + return &latestCompleteBackupJob, nil +} + +// getSnapshotWithLatestError takes a VolumeSnapshotList and returns a pointer to the +// snapshot that has most recently had an error. If no snapshot errors exist +// then it returns nil. +func getSnapshotWithLatestError(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { + zeroTime := metav1.NewTime(time.Time{}) + snapshotWithLatestError := volumesnapshotv1.VolumeSnapshot{ + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &zeroTime, + }, + }, + } + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.Error != nil && + snapshotWithLatestError.Status.Error.Time.Before(snapshot.Status.Error.Time) { + snapshotWithLatestError = snapshot + } + } + + if snapshotWithLatestError.Status.Error.Time.Equal(&zeroTime) { + return nil + } + + return &snapshotWithLatestError +} + +// getSnapshotsForCluster gets all the VolumeSnapshots for a given postgrescluster. +func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta1.PostgresCluster) ( + *volumesnapshotv1.VolumeSnapshotList, error) { + + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + if err != nil { + return nil, err + } + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + + return snapshots, err +} + +// getLatestReadySnapshot takes a VolumeSnapshotList and returns the latest ready VolumeSnapshot. +func getLatestReadySnapshot(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { + zeroTime := metav1.NewTime(time.Time{}) + latestReadySnapshot := volumesnapshotv1.VolumeSnapshot{ + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &zeroTime, + }, + } + for _, snapshot := range snapshots.Items { + if snapshot.Status != nil && snapshot.Status.ReadyToUse != nil && *snapshot.Status.ReadyToUse && + latestReadySnapshot.Status.CreationTime.Before(snapshot.Status.CreationTime) { + latestReadySnapshot = snapshot + } + } + + if latestReadySnapshot.Status.CreationTime.Equal(&zeroTime) { + return nil + } + + return &latestReadySnapshot +} + +// deleteSnapshots takes a postgrescluster and a snapshot list and deletes all snapshots +// in the list that are controlled by the provided postgrescluster. +func (r *Reconciler) deleteSnapshots(ctx context.Context, + postgrescluster *v1beta1.PostgresCluster, snapshots *volumesnapshotv1.VolumeSnapshotList) error { + + for i := range snapshots.Items { + err := errors.WithStack(client.IgnoreNotFound( + r.deleteControlled(ctx, postgrescluster, &snapshots.Items[i]))) + if err != nil { + return err + } + } + return nil +} + +// tablespaceVolumesInUse determines if the TablespaceVolumes feature is enabled and the given +// cluster has tablespace volumes in place. +func clusterUsingTablespaces(ctx context.Context, postgrescluster *v1beta1.PostgresCluster) bool { + for _, instanceSet := range postgrescluster.Spec.InstanceSets { + if len(instanceSet.TablespaceVolumes) > 0 { + return feature.Enabled(ctx, feature.TablespaceVolumes) + } + } + return false +} diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go new file mode 100644 index 0000000000..4c3d987ecd --- /dev/null +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -0,0 +1,1476 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgrescluster + +import ( + "context" + "testing" + "time" + + "github.com/pkg/errors" + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/initialize" + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" +) + +func TestReconcileVolumeSnapshots(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + recorder := events.NewRecorder(t, runtime.Scheme) + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + Recorder: recorder, + } + ns := setupNamespace(t, cc) + + // Enable snapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx = feature.NewContext(ctx, gate) + + t.Run("SnapshotsDisabledDeleteSnapshots", func(t *testing.T) { + // Create cluster (without snapshots spec) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create a snapshot + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + volumeSnapshotClassName := "my-snapshotclass" + snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) + assert.NilError(t, err) + err = errors.WithStack(r.apply(ctx, snapshot)) + assert.NilError(t, err) + + // Get all snapshots for this cluster and assert 1 exists + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + + // Reconcile snapshots + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Get all snapshots for this cluster and assert 0 exist + assert.NilError(t, err) + snapshots = &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("SnapshotsEnabledTablespacesEnabled", func(t *testing.T) { + // Enable both tablespaces and snapshots feature gates + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + feature.VolumeSnapshots: true, + })) + ctx := feature.NewContext(ctx, gate) + + // Create a cluster with snapshots and tablespaces enabled + volumeSnapshotClassName := "my-snapshotclass" + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + cluster.Spec.InstanceSets[0].TablespaceVolumes = []v1beta1.TablespaceVolume{{ + Name: "volume-1", + }} + + // Create pvc for reconcile + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert warning event was created and has expected attributes + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PostgresCluster") + assert.Equal(t, recorder.Events[0].Regarding.Name, "hippo") + assert.Equal(t, recorder.Events[0].Reason, "IncompatibleFeatures") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "VolumeSnapshots not currently compatible with TablespaceVolumes")) + } + }) + + t.Run("SnapshotsEnabledNoPvcAnnotation", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc for reconcile + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert no snapshots exist + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("SnapshotsEnabledReadySnapshotsExist", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc with annotation + pvcName := initialize.String("dedicated-snapshot-volume") + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: *pvcName, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + }, + }, + } + + // Create snapshot with annotation matching the pvc annotation + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + }, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(cluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + // Update snapshot status + truePtr := initialize.Bool(true) + snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: truePtr, + } + err = r.Client.Status().Update(ctx, snapshot1) + assert.NilError(t, err) + + // Create second snapshot with different annotation value + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "second-snapshot", + Namespace: ns.Name, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "older-backup-timestamp", + }, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + // Update second snapshot's status + snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: truePtr, + } + err = r.Client.Status().Update(ctx, snapshot2) + assert.NilError(t, err) + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert first snapshot exists and second snapshot was deleted + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Name, "first-snapshot") + + // Cleanup + err = r.deleteControlled(ctx, cluster, snapshot1) + assert.NilError(t, err) + }) + + t.Run("SnapshotsEnabledCreateSnapshot", func(t *testing.T) { + // Create a volume snapshot class + volumeSnapshotClassName := "my-snapshotclass" + volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSnapshotClassName, + }, + DeletionPolicy: "Delete", + } + assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + + // Create a cluster with snapshots enabled + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: volumeSnapshotClassName, + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create pvc with annotation + pvcName := initialize.String("dedicated-snapshot-volume") + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: *pvcName, + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "another-backup-timestamp", + }, + }, + } + + // Reconcile + err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) + assert.NilError(t, err) + + // Assert that a snapshot was created + selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + snapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, snapshots, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectSnapshots}, + )) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], + "another-backup-timestamp") + }) +} + +func TestReconcileDedicatedSnapshotVolume(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + recorder := events.NewRecorder(t, runtime.Scheme) + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + Recorder: recorder, + } + + // Enable snapshots feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.VolumeSnapshots: true, + })) + ctx = feature.NewContext(ctx, gate) + + t.Run("SnapshotsDisabledDeletePvc", func(t *testing.T) { + // Create cluster without snapshots spec + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create a dedicated snapshot volume + pvc := &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + }, + }, + Spec: testVolumeClaimSpec(), + } + err = errors.WithStack(r.setControllerReference(cluster, pvc)) + assert.NilError(t, err) + err = r.apply(ctx, pvc) + assert.NilError(t, err) + + // Assert that the pvc was created + selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + pvcs := &corev1.PersistentVolumeClaimList{} + err = errors.WithStack( + r.Client.List(ctx, pvcs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectPvcs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(pvcs.Items), 1) + + // Create volumes for reconcile + clusterVolumes := []corev1.PersistentVolumeClaim{*pvc} + + // Reconcile + returned, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Check(t, returned == nil) + + // Assert that the pvc has been deleted or marked for deletion + key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} + if err := r.Client.Get(ctx, key, fetched); err == nil { + assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") + } else { + assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) + } + }) + + t.Run("SnapshotsEnabledCreatePvcNoBackupNoRestore", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create volumes for reconcile + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert pvc was created + selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) + assert.NilError(t, err) + pvcs := &corev1.PersistentVolumeClaimList{} + err = errors.WithStack( + r.Client.List(ctx, pvcs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectPvcs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(pvcs.Items), 1) + }) + + t.Run("SnapshotsEnabledBackupExistsCreateRestore", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + currentTime := metav1.Now() + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create instance set and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert restore job with annotation was created + restoreJobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, restoreJobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(restoreJobs.Items), 1) + assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion] != "") + }) + + t.Run("SnapshotsEnabledSuccessfulRestoreExists", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create times for jobs + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create successful restore job + restoreJob := testRestoreJob(cluster) + restoreJob.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), + } + err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, err) + err = r.apply(ctx, restoreJob) + assert.NilError(t, err) + + restoreJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, restoreJob) + assert.NilError(t, err) + + // Create instance set and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert restore job was deleted + restoreJobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, restoreJobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(restoreJobs.Items), 0) + + // Assert pvc was annotated + assert.Equal(t, pvc.GetAnnotations()[naming.PGBackRestBackupJobCompletion], backupJob.Status.CompletionTime.Format(time.RFC3339)) + }) + + t.Run("SnapshotsEnabledFailedRestoreExists", func(t *testing.T) { + // Create cluster with snapshots enabled + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshotclass", + } + assert.NilError(t, r.Client.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + + // Create times for jobs + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + + // Create successful backup job + backupJob := testBackupJob(cluster) + err = errors.WithStack(r.setControllerReference(cluster, backupJob)) + assert.NilError(t, err) + err = r.apply(ctx, backupJob) + assert.NilError(t, err) + + backupJob.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, backupJob) + assert.NilError(t, err) + + // Create failed restore job + restoreJob := testRestoreJob(cluster) + restoreJob.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), + } + err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, err) + err = r.apply(ctx, restoreJob) + assert.NilError(t, err) + + restoreJob.Status = batchv1.JobStatus{ + Succeeded: 0, + Failed: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, restoreJob) + assert.NilError(t, err) + + // Setup instances and volumes for reconcile + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + clusterVolumes := []corev1.PersistentVolumeClaim{} + + // Reconcile + pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) + assert.NilError(t, err) + assert.Assert(t, pvc != nil) + + // Assert warning event was created and has expected attributes + if assert.Check(t, len(recorder.Events) > 0) { + assert.Equal(t, recorder.Events[0].Type, "Warning") + assert.Equal(t, recorder.Events[0].Regarding.Kind, "PostgresCluster") + assert.Equal(t, recorder.Events[0].Regarding.Name, "hippo") + assert.Equal(t, recorder.Events[0].Reason, "DedicatedSnapshotVolumeRestoreJobError") + assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "restore job failed, check the logs")) + } + }) +} + +func TestCreateDedicatedSnapshotVolume(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + + labelMap := map[string]string{ + naming.LabelCluster: cluster.Name, + naming.LabelRole: naming.RoleSnapshot, + naming.LabelData: naming.DataPostgres, + } + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: naming.ClusterDedicatedSnapshotVolume(cluster)} + pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) + + pvc, err := r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) + assert.NilError(t, err) + assert.Assert(t, metav1.IsControlledBy(pvc, cluster)) + assert.Equal(t, pvc.Spec.Resources.Requests[corev1.ResourceStorage], resource.MustParse("1Gi")) +} + +func TestDedicatedSnapshotVolumeRestore(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + + ns := setupNamespace(t, cc) + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + + sts := &appsv1.StatefulSet{} + generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) + currentTime := metav1.Now() + backupJob := testBackupJob(cluster) + backupJob.Status.CompletionTime = ¤tTime + + err := r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) + assert.NilError(t, err) + + // Assert a restore job was created that has the correct annotation + jobs := &batchv1.JobList{} + selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) + assert.NilError(t, err) + err = errors.WithStack( + r.Client.List(ctx, jobs, + client.InNamespace(cluster.Namespace), + client.MatchingLabelsSelector{Selector: selectJobs}, + )) + assert.NilError(t, err) + assert.Equal(t, len(jobs.Items), 1) + assert.Equal(t, jobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], + backupJob.Status.CompletionTime.Format(time.RFC3339)) +} + +func TestGenerateSnapshotOfDedicatedSnapshotVolume(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ + VolumeSnapshotClassName: "my-snapshot", + } + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-completion-timestamp", + }, + Name: "dedicated-snapshot-volume", + }, + } + + snapshot, err := r.generateSnapshotOfDedicatedSnapshotVolume(cluster, pvc) + assert.NilError(t, err) + assert.Equal(t, snapshot.GetAnnotations()[naming.PGBackRestBackupJobCompletion], + "backup-completion-timestamp") +} + +func TestGenerateVolumeSnapshot(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dedicated-snapshot-volume", + }, + } + volumeSnapshotClassName := "my-snapshot" + + snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) + assert.NilError(t, err) + assert.Equal(t, *snapshot.Spec.VolumeSnapshotClassName, "my-snapshot") + assert.Equal(t, *snapshot.Spec.Source.PersistentVolumeClaimName, "dedicated-snapshot-volume") + assert.Equal(t, snapshot.Labels[naming.LabelCluster], "hippo") + assert.Equal(t, snapshot.ObjectMeta.OwnerReferences[0].Name, "hippo") +} + +func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoRestoreJobs", func(t *testing.T) { + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, dsvRestoreJob == nil) + }) + + t.Run("NoDsvRestoreJobs", func(t *testing.T) { + job1 := testRestoreJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, dsvRestoreJob == nil) + }) + + t.Run("DsvRestoreJobExists", func(t *testing.T) { + job2 := testRestoreJob(cluster) + job2.Name = "restore-job-2" + job2.Namespace = ns.Name + job2.Annotations = map[string]string{ + naming.PGBackRestBackupJobCompletion: "backup-timestamp", + } + + err := r.apply(ctx, job2) + assert.NilError(t, err) + + job3 := testRestoreJob(cluster) + job3.Name = "restore-job-3" + job3.Namespace = ns.Name + + err = r.apply(ctx, job3) + assert.NilError(t, err) + + dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) + assert.NilError(t, err) + assert.Assert(t, dsvRestoreJob != nil) + assert.Equal(t, dsvRestoreJob.Name, "restore-job-2") + }) +} + +func TestGetLatestCompleteBackupJob(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + // require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoJobs", func(t *testing.T) { + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob == nil) + }) + + t.Run("NoCompleteJobs", func(t *testing.T) { + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob == nil) + }) + + t.Run("OneCompleteBackupJob", func(t *testing.T) { + currentTime := metav1.Now() + + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + job2 := testBackupJob(cluster) + job2.Namespace = ns.Name + job2.Name = "backup-job-2" + + err = r.apply(ctx, job2) + assert.NilError(t, err) + + // Get job1 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) + assert.NilError(t, err) + + job1.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, job1) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob.Name == "backup-job-1") + }) + + t.Run("TwoCompleteBackupJobs", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + assert.Check(t, earlierTime.Before(¤tTime)) + + job1 := testBackupJob(cluster) + job1.Namespace = ns.Name + + err := r.apply(ctx, job1) + assert.NilError(t, err) + + job2 := testBackupJob(cluster) + job2.Namespace = ns.Name + job2.Name = "backup-job-2" + + err = r.apply(ctx, job2) + assert.NilError(t, err) + + // Get job1 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) + assert.NilError(t, err) + + job1.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: ¤tTime, + } + err = r.Client.Status().Update(ctx, job1) + assert.NilError(t, err) + + // Get job2 and update Status. + err = r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2) + assert.NilError(t, err) + + job2.Status = batchv1.JobStatus{ + Succeeded: 1, + CompletionTime: &earlierTime, + } + err = r.Client.Status().Update(ctx, job2) + assert.NilError(t, err) + + latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) + assert.NilError(t, err) + assert.Check(t, latestCompleteBackupJob.Name == "backup-job-1") + }) +} + +func TestGetSnapshotWithLatestError(t *testing.T) { + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("NoSnapshotsWithErrors", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(true), + }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + + t.Run("OneSnapshotWithError", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, + }, + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "bad-snapshot") + }) + + t.Run("TwoSnapshotsWithErrors", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-bad-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: ¤tTime, + }, + }, + }, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "second-bad-snapshot") + }) +} + +func TestGetSnapshotsForCluster(t *testing.T) { + ctx := context.Background() + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 1) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + + t.Run("NoSnapshots", func(t *testing.T) { + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("NoSnapshotsForCluster", func(t *testing.T) { + snapshot := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 0) + }) + + t.Run("OneSnapshotForCluster", func(t *testing.T) { + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "rhino", + }, + }, + } + snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "another-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") + snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 1) + assert.Equal(t, snapshots.Items[0].Name, "another-snapshot") + }) + + t.Run("TwoSnapshotsForCluster", func(t *testing.T) { + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "some-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") + snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") + err := r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "another-snapshot", + Namespace: ns.Name, + Labels: map[string]string{ + naming.LabelCluster: "hippo", + }, + }, + } + snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") + snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshots, err := r.getSnapshotsForCluster(ctx, cluster) + assert.NilError(t, err) + assert.Equal(t, len(snapshots.Items), 2) + }) +} + +func TestGetLatestReadySnapshot(t *testing.T) { + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("NoReadySnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + + t.Run("OneReadySnapshot", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(false), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "good-snapshot") + }) + + t.Run("TwoReadySnapshots", func(t *testing.T) { + currentTime := metav1.Now() + earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-good-snapshot", + UID: "the-uid-123", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-good-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), + }, + }, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "second-good-snapshot") + }) +} + +func TestDeleteSnapshots(t *testing.T) { + ctx := context.Background() + cfg, cc := setupKubernetes(t) + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + assert.NilError(t, err) + + r := &Reconciler{ + Client: cc, + Owner: client.FieldOwner(t.Name()), + DiscoveryClient: discoveryClient, + } + ns := setupNamespace(t, cc) + + cluster := testCluster() + cluster.Namespace = ns.Name + cluster.ObjectMeta.UID = "the-uid-123" + assert.NilError(t, r.Client.Create(ctx, cluster)) + + rhinoCluster := testCluster() + rhinoCluster.Name = "rhino" + rhinoCluster.Namespace = ns.Name + rhinoCluster.ObjectMeta.UID = "the-uid-456" + assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) + + t.Cleanup(func() { + assert.Check(t, r.Client.Delete(ctx, cluster)) + assert.Check(t, r.Client.Delete(ctx, rhinoCluster)) + }) + + t.Run("NoSnapshots", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{} + err := r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + }) + + t.Run("NoSnapshotsControlledByHippo", func(t *testing.T) { + pvcName := initialize.String("dedicated-snapshot-volume") + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + *snapshot1, + }, + } + err = r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, existingSnapshots, + client.InNamespace(ns.Namespace), + )) + assert.NilError(t, err) + assert.Equal(t, len(existingSnapshots.Items), 1) + }) + + t.Run("OneSnapshotControlledByHippo", func(t *testing.T) { + pvcName := initialize.String("dedicated-snapshot-volume") + snapshot1 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "first-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot1) + assert.NilError(t, err) + + snapshot2 := &volumesnapshotv1.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: volumesnapshotv1.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "second-snapshot", + Namespace: ns.Name, + }, + Spec: volumesnapshotv1.VolumeSnapshotSpec{ + Source: volumesnapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: pvcName, + }, + }, + } + err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, err) + err = r.apply(ctx, snapshot2) + assert.NilError(t, err) + + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + *snapshot1, *snapshot2, + }, + } + err = r.deleteSnapshots(ctx, cluster, snapshotList) + assert.NilError(t, err) + existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} + err = errors.WithStack( + r.Client.List(ctx, existingSnapshots, + client.InNamespace(ns.Namespace), + )) + assert.NilError(t, err) + assert.Equal(t, len(existingSnapshots.Items), 1) + assert.Equal(t, existingSnapshots.Items[0].Name, "first-snapshot") + }) +} + +func TestClusterUsingTablespaces(t *testing.T) { + ctx := context.Background() + cluster := testCluster() + + t.Run("NoVolumesFeatureEnabled", func(t *testing.T) { + // Enable Tablespaces feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + assert.Assert(t, !clusterUsingTablespaces(ctx, cluster)) + }) + + t.Run("VolumesInPlaceFeatureDisabled", func(t *testing.T) { + cluster.Spec.InstanceSets[0].TablespaceVolumes = []v1beta1.TablespaceVolume{{ + Name: "volume-1", + }} + + assert.Assert(t, !clusterUsingTablespaces(ctx, cluster)) + }) + + t.Run("VolumesInPlaceAndFeatureEnabled", func(t *testing.T) { + // Enable Tablespaces feature gate + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + + assert.Assert(t, clusterUsingTablespaces(ctx, cluster)) + }) +} diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index d62bd4016a..2a0e3d76ec 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -65,7 +54,10 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") suite.Environment = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "hack", "tools", "external-snapshotter", "client", "config", "crd"), + }, } _, err := suite.Environment.Start() diff --git a/internal/controller/postgrescluster/topology.go b/internal/controller/postgrescluster/topology.go index a1a73d8581..58778be907 100644 --- a/internal/controller/postgrescluster/topology.go +++ b/internal/controller/postgrescluster/topology.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster diff --git a/internal/controller/postgrescluster/topology_test.go b/internal/controller/postgrescluster/topology_test.go index 1fa7640fc2..40c8c0dd7f 100644 --- a/internal/controller/postgrescluster/topology_test.go +++ b/internal/controller/postgrescluster/topology_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -20,6 +9,8 @@ import ( "gotest.tools/v3/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestDefaultTopologySpreadConstraints(t *testing.T) { @@ -31,7 +22,7 @@ func TestDefaultTopologySpreadConstraints(t *testing.T) { }) // Entire selector, hostname, zone, and ScheduleAnyway. - assert.Assert(t, marshalMatches(constraints, ` + assert.Assert(t, cmp.MarshalMatches(constraints, ` - labelSelector: matchExpressions: - key: k1 diff --git a/internal/controller/postgrescluster/util.go b/internal/controller/postgrescluster/util.go index a6f9f12da3..25120ab574 100644 --- a/internal/controller/postgrescluster/util.go +++ b/internal/controller/postgrescluster/util.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -24,7 +13,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/rand" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" @@ -297,22 +285,3 @@ func safeHash32(content func(w io.Writer) error) (string, error) { } return rand.SafeEncodeString(fmt.Sprint(hash.Sum32())), nil } - -// updateReconcileResult creates a new Result based on the new and existing results provided to it. -// This includes setting "Requeue" to true in the Result if set to true in the new Result but not -// in the existing Result, while also updating RequeueAfter if the RequeueAfter value for the new -// result is less than the RequeueAfter value for the existing Result. -func updateReconcileResult(currResult, newResult reconcile.Result) reconcile.Result { - - if newResult.Requeue { - currResult.Requeue = true - } - - if newResult.RequeueAfter != 0 { - if currResult.RequeueAfter == 0 || newResult.RequeueAfter < currResult.RequeueAfter { - currResult.RequeueAfter = newResult.RequeueAfter - } - } - - return currResult -} diff --git a/internal/controller/postgrescluster/util_test.go b/internal/controller/postgrescluster/util_test.go index dab383d8a7..51a32f1e85 100644 --- a/internal/controller/postgrescluster/util_test.go +++ b/internal/controller/postgrescluster/util_test.go @@ -1,32 +1,18 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( "errors" - "fmt" "io" "testing" - "time" "gotest.tools/v3/assert" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" @@ -53,142 +39,6 @@ func TestSafeHash32(t *testing.T) { assert.Equal(t, same, stuff, "expected deterministic hash") } -func TestUpdateReconcileResult(t *testing.T) { - - testCases := []struct { - currResult reconcile.Result - newResult reconcile.Result - requeueExpected bool - expectedRequeueAfter time.Duration - }{{ - currResult: reconcile.Result{}, - newResult: reconcile.Result{}, - requeueExpected: false, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: false}, - newResult: reconcile.Result{Requeue: true}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: true}, - newResult: reconcile.Result{Requeue: false}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: true}, - newResult: reconcile.Result{Requeue: true}, - requeueExpected: true, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{Requeue: false}, - newResult: reconcile.Result{Requeue: false}, - requeueExpected: false, - expectedRequeueAfter: 0, - }, { - currResult: reconcile.Result{}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 1 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 1 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - newResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - requeueExpected: false, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: false, RequeueAfter: 1 * time.Second, - }, - newResult: reconcile.Result{ - Requeue: false, RequeueAfter: 5 * time.Second, - }, - requeueExpected: false, - expectedRequeueAfter: 1 * time.Second, - }, { - currResult: reconcile.Result{}, - newResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - requeueExpected: true, - expectedRequeueAfter: 5 * time.Second, - }, { - currResult: reconcile.Result{ - Requeue: true, RequeueAfter: 5 * time.Second, - }, - newResult: reconcile.Result{}, - requeueExpected: true, - expectedRequeueAfter: 5 * time.Second, - }} - - for _, tc := range testCases { - t.Run(fmt.Sprintf("curr: %v, new: %v", tc.currResult, tc.newResult), func(t *testing.T) { - result := updateReconcileResult(tc.currResult, tc.newResult) - assert.Assert(t, result.Requeue == tc.requeueExpected) - assert.Assert(t, result.RequeueAfter == tc.expectedRequeueAfter) - }) - } -} - func TestAddDevSHM(t *testing.T) { testCases := []struct { diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index 657f6a2220..e40710d4ff 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -130,6 +119,15 @@ func (r *Reconciler) observePersistentVolumeClaims( resizing.LastTransitionTime = minNotZero( resizing.LastTransitionTime, condition.LastTransitionTime) } + + case + // The "ModifyingVolume" and "ModifyVolumeError" conditions occur + // when the attribute class of a PVC is changing. These attributes + // do not affect the size of a volume, so there's nothing to do. + // See the "VolumeAttributesClass" feature gate. + // - https://git.k8s.io/enhancements/keps/sig-storage/3751-volume-attributes-class + corev1.PersistentVolumeClaimVolumeModifyingVolume, + corev1.PersistentVolumeClaimVolumeModifyVolumeError: } } } @@ -501,10 +499,9 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, }, } // set the priority class name, if it exists - if len(cluster.Spec.InstanceSets) > 0 && - cluster.Spec.InstanceSets[0].PriorityClassName != nil { + if len(cluster.Spec.InstanceSets) > 0 { jobSpec.Template.Spec.PriorityClassName = - *cluster.Spec.InstanceSets[0].PriorityClassName + initialize.FromPointer(cluster.Spec.InstanceSets[0].PriorityClassName) } moveDirJob.Spec = *jobSpec @@ -619,10 +616,9 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, }, } // set the priority class name, if it exists - if len(cluster.Spec.InstanceSets) > 0 && - cluster.Spec.InstanceSets[0].PriorityClassName != nil { + if len(cluster.Spec.InstanceSets) > 0 { jobSpec.Template.Spec.PriorityClassName = - *cluster.Spec.InstanceSets[0].PriorityClassName + initialize.FromPointer(cluster.Spec.InstanceSets[0].PriorityClassName) } moveDirJob.Spec = *jobSpec @@ -742,9 +738,7 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, } // set the priority class name, if it exists if repoHost := cluster.Spec.Backups.PGBackRest.RepoHost; repoHost != nil { - if repoHost.PriorityClassName != nil { - jobSpec.Template.Spec.PriorityClassName = *repoHost.PriorityClassName - } + jobSpec.Template.Spec.PriorityClassName = initialize.FromPointer(repoHost.PriorityClassName) } moveDirJob.Spec = *jobSpec diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index 11e5974a0e..96eef5f916 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster @@ -281,7 +270,7 @@ func TestGetPVCNameMethods(t *testing.T) { AccessModes: []corev1.PersistentVolumeAccessMode{ "ReadWriteMany", }, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -406,7 +395,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -422,7 +411,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource. Quantity{ corev1.ResourceStorage: resource. @@ -476,7 +465,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { assert.Assert(t, len(clusterVolumes) == 1) // observe again, but allow time for the change to be observed - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) return len(clusterVolumes) == 1, err }) @@ -542,7 +531,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { assert.Assert(t, len(clusterVolumes) == 2) // observe again, but allow time for the change to be observed - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) return len(clusterVolumes) == 2, err }) @@ -610,7 +599,7 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { assert.Assert(t, len(clusterVolumes) == 3) // observe again, but allow time for the change to be observed - err = wait.Poll(time.Second/2, Scale(time.Second*15), func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, time.Second/2, Scale(time.Second*15), false, func(ctx context.Context) (bool, error) { clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster) return len(clusterVolumes) == 3, err }) @@ -689,7 +678,7 @@ func TestReconcileMoveDirectories(t *testing.T) { DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, @@ -713,7 +702,7 @@ func TestReconcileMoveDirectories(t *testing.T) { VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteMany}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource. Quantity{ corev1.ResourceStorage: resource. @@ -800,7 +789,7 @@ volumes: claimName: testpgdata ` - assert.Assert(t, marshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) } } @@ -860,7 +849,7 @@ volumes: claimName: testwal ` - assert.Assert(t, marshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) } } @@ -921,7 +910,7 @@ volumes: persistentVolumeClaim: claimName: testrepo ` - assert.Assert(t, marshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) + assert.Assert(t, cmp.MarshalMatches(moveJobs.Items[i].Spec.Template.Spec, compare+"\n")) } } diff --git a/internal/controller/postgrescluster/watches.go b/internal/controller/postgrescluster/watches.go index 44330585ee..0b5ba5fa87 100644 --- a/internal/controller/postgrescluster/watches.go +++ b/internal/controller/postgrescluster/watches.go @@ -1,21 +1,12 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -29,7 +20,7 @@ import ( // watchPods returns a handler.EventHandler for Pods. func (*Reconciler) watchPods() handler.Funcs { return handler.Funcs{ - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { labels := e.ObjectNew.GetLabels() cluster := labels[naming.LabelCluster] @@ -69,6 +60,17 @@ func (*Reconciler) watchPods() handler.Funcs { }}) return } + + oldAnnotations := e.ObjectOld.GetAnnotations() + newAnnotations := e.ObjectNew.GetAnnotations() + // If the suggested-pgdata-pvc-size annotation is added or changes, reconcile. + if len(cluster) != 0 && oldAnnotations["suggested-pgdata-pvc-size"] != newAnnotations["suggested-pgdata-pvc-size"] { + q.Add(reconcile.Request{NamespacedName: client.ObjectKey{ + Namespace: e.ObjectNew.GetNamespace(), + Name: cluster, + }}) + return + } }, } } diff --git a/internal/controller/postgrescluster/watches_test.go b/internal/controller/postgrescluster/watches_test.go index c29bad700d..fdea498862 100644 --- a/internal/controller/postgrescluster/watches_test.go +++ b/internal/controller/postgrescluster/watches_test.go @@ -1,21 +1,11 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgrescluster import ( + "context" "testing" "gotest.tools/v3/assert" @@ -28,21 +18,22 @@ import ( ) func TestWatchPodsUpdate(t *testing.T) { - queue := controllertest.Queue{Interface: workqueue.New()} + ctx := context.Background() + queue := &controllertest.Queue{Interface: workqueue.New()} reconciler := &Reconciler{} update := reconciler.watchPods().UpdateFunc assert.Assert(t, update != nil) // No metadata; no reconcile. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{}, ObjectNew: &corev1.Pod{}, }, queue) assert.Equal(t, queue.Len(), 0) // Cluster label, but nothing else; no reconcile. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -61,7 +52,7 @@ func TestWatchPodsUpdate(t *testing.T) { assert.Equal(t, queue.Len(), 0) // Cluster standby leader changed; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -108,7 +99,7 @@ func TestWatchPodsUpdate(t *testing.T) { } // Newly pending; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: base.DeepCopy(), ObjectNew: pending.DeepCopy(), }, queue) @@ -119,7 +110,7 @@ func TestWatchPodsUpdate(t *testing.T) { queue.Done(item) // Still pending; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: pending.DeepCopy(), ObjectNew: pending.DeepCopy(), }, queue) @@ -130,7 +121,7 @@ func TestWatchPodsUpdate(t *testing.T) { queue.Done(item) // No longer pending; one reconcile by label. - update(event.UpdateEvent{ + update(ctx, event.UpdateEvent{ ObjectOld: pending.DeepCopy(), ObjectNew: base.DeepCopy(), }, queue) @@ -140,4 +131,54 @@ func TestWatchPodsUpdate(t *testing.T) { assert.Equal(t, item, expected) queue.Done(item) }) + + // Pod annotation with arbitrary key; no reconcile. + update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "clortho": "vince", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + ObjectNew: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "clortho": "vin", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + }, queue) + assert.Equal(t, queue.Len(), 0) + + // Pod annotation with suggested-pgdata-pvc-size; reconcile. + update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "suggested-pgdata-pvc-size": "5000Mi", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + ObjectNew: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "suggested-pgdata-pvc-size": "8000Mi", + }, + Labels: map[string]string{ + "postgres-operator.crunchydata.com/cluster": "starfish", + }, + }, + }, + }, queue) + assert.Equal(t, queue.Len(), 1) } diff --git a/internal/controller/runtime/client.go b/internal/controller/runtime/client.go index 162565a2f1..4cc05c9835 100644 --- a/internal/controller/runtime/client.go +++ b/internal/controller/runtime/client.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime @@ -23,10 +12,7 @@ import ( // Types that implement single methods of the [client.Reader] interface. type ( - // NOTE: The signature of [client.Client.Get] changes in [sigs.k8s.io/controller-runtime@v0.13.0]. - // - https://github.com/kubernetes-sigs/controller-runtime/releases/tag/v0.13.0 - - ClientGet func(context.Context, client.ObjectKey, client.Object) error + ClientGet func(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error ClientList func(context.Context, client.ObjectList, ...client.ListOption) error ) @@ -73,8 +59,8 @@ func (fn ClientDeleteAll) DeleteAllOf(ctx context.Context, obj client.Object, op return fn(ctx, obj, opts...) } -func (fn ClientGet) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { - return fn(ctx, key, obj) +func (fn ClientGet) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return fn(ctx, key, obj, opts...) } func (fn ClientList) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { diff --git a/internal/controller/runtime/pod_client.go b/internal/controller/runtime/pod_client.go index 0e649372e2..e842601aa7 100644 --- a/internal/controller/runtime/pod_client.go +++ b/internal/controller/runtime/pod_client.go @@ -1,21 +1,11 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime import ( + "context" "io" corev1 "k8s.io/api/core/v1" @@ -29,14 +19,18 @@ import ( // podExecutor runs command on container in pod in namespace. Non-nil streams // (stdin, stdout, and stderr) are attached the to the remote process. type podExecutor func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error func newPodClient(config *rest.Config) (rest.Interface, error) { codecs := serializer.NewCodecFactory(scheme.Scheme) gvk, _ := apiutil.GVKForObject(&corev1.Pod{}, scheme.Scheme) - return apiutil.RESTClientForGVK(gvk, false, config, codecs) + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + return apiutil.RESTClientForGVK(gvk, false, config, codecs, httpClient) } // +kubebuilder:rbac:groups="",resources="pods/exec",verbs={create} @@ -45,7 +39,7 @@ func NewPodExecutor(config *rest.Config) (podExecutor, error) { client, err := newPodClient(config) return func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { request := client.Post(). @@ -62,7 +56,7 @@ func NewPodExecutor(config *rest.Config) (podExecutor, error) { exec, err := remotecommand.NewSPDYExecutor(config, "POST", request.URL()) if err == nil { - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ Stdin: stdin, Stdout: stdout, Stderr: stderr, diff --git a/internal/controller/runtime/reconcile.go b/internal/controller/runtime/reconcile.go new file mode 100644 index 0000000000..a2196d1626 --- /dev/null +++ b/internal/controller/runtime/reconcile.go @@ -0,0 +1,69 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ErrorWithBackoff returns a Result and error that indicate a non-nil err +// should be logged and measured and its [reconcile.Request] should be retried +// later. When err is nil, nothing is logged and the Request is not retried. +// When err unwraps to [reconcile.TerminalError], the Request is not retried. +func ErrorWithBackoff(err error) (reconcile.Result, error) { + // Result should be zero to avoid warning messages. + return reconcile.Result{}, err + + // When error is not nil and not a TerminalError, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddRateLimited. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L317 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#RateLimitingInterface +} + +// ErrorWithoutBackoff returns a Result and error that indicate a non-nil err +// should be logged and measured without retrying its [reconcile.Request]. +// When err is nil, nothing is logged and the Request is not retried. +func ErrorWithoutBackoff(err error) (reconcile.Result, error) { + if err != nil { + err = reconcile.TerminalError(err) + } + + // Result should be zero to avoid warning messages. + return reconcile.Result{}, err + + // When error is a TerminalError, the controller-runtime Controller increments + // a counter rather than interact with the workqueue. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L314 +} + +// RequeueWithBackoff returns a Result that indicates a [reconcile.Request] +// should be retried later. +func RequeueWithBackoff() reconcile.Result { + return reconcile.Result{Requeue: true} + + // When [reconcile.Result].Requeue is true, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddRateLimited. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L334 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#RateLimitingInterface +} + +// RequeueWithoutBackoff returns a Result that indicates a [reconcile.Request] +// should be retried on or before delay. +func RequeueWithoutBackoff(delay time.Duration) reconcile.Result { + // RequeueAfter must be positive to not backoff. + if delay <= 0 { + delay = time.Nanosecond + } + + // RequeueAfter implies Requeue, but set both to remove any ambiguity. + return reconcile.Result{Requeue: true, RequeueAfter: delay} + + // When [reconcile.Result].RequeueAfter is positive, the controller-runtime Controller + // puts [reconcile.Request] back into the workqueue using AddAfter. + // - https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.4/pkg/internal/controller/controller.go#L325 + // - https://pkg.go.dev/k8s.io/client-go/util/workqueue#DelayingInterface +} diff --git a/internal/controller/runtime/reconcile_test.go b/internal/controller/runtime/reconcile_test.go new file mode 100644 index 0000000000..925b3cf47d --- /dev/null +++ b/internal/controller/runtime/reconcile_test.go @@ -0,0 +1,57 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "errors" + "testing" + "time" + + "gotest.tools/v3/assert" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestErrorWithBackoff(t *testing.T) { + result, err := ErrorWithBackoff(nil) + assert.Assert(t, result.IsZero()) + assert.NilError(t, err) + + expected := errors.New("doot") + result, err = ErrorWithBackoff(expected) + assert.Assert(t, result.IsZero()) + assert.Equal(t, err, expected) +} + +func TestErrorWithoutBackoff(t *testing.T) { + result, err := ErrorWithoutBackoff(nil) + assert.Assert(t, result.IsZero()) + assert.NilError(t, err) + + expected := errors.New("doot") + result, err = ErrorWithoutBackoff(expected) + assert.Assert(t, result.IsZero()) + assert.Assert(t, errors.Is(err, reconcile.TerminalError(nil))) + assert.Equal(t, errors.Unwrap(err), expected) +} + +func TestRequeueWithBackoff(t *testing.T) { + result := RequeueWithBackoff() + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter == 0) +} + +func TestRequeueWithoutBackoff(t *testing.T) { + result := RequeueWithoutBackoff(0) + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter > 0) + + result = RequeueWithoutBackoff(-1) + assert.Assert(t, result.Requeue) + assert.Assert(t, result.RequeueAfter > 0) + + result = RequeueWithoutBackoff(time.Minute) + assert.Assert(t, result.Requeue) + assert.Equal(t, result.RequeueAfter, time.Minute) +} diff --git a/internal/controller/runtime/runtime.go b/internal/controller/runtime/runtime.go index 79bb8046da..34bfeabf61 100644 --- a/internal/controller/runtime/runtime.go +++ b/internal/controller/runtime/runtime.go @@ -1,30 +1,31 @@ -/* -Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime import ( - "time" + "context" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" +) + +type ( + CacheConfig = cache.Config + Manager = manager.Manager + Options = manager.Options ) // Scheme associates standard Kubernetes API objects and PGO API objects with Go structs. @@ -37,37 +38,39 @@ func init() { if err := v1beta1.AddToScheme(Scheme); err != nil { panic(err) } + if err := volumesnapshotv1.AddToScheme(Scheme); err != nil { + panic(err) + } } -// default refresh interval in minutes -var refreshInterval = 60 * time.Minute - -// CreateRuntimeManager creates a new controller runtime manager for the PostgreSQL Operator. The -// manager returned is configured specifically for the PostgreSQL Operator, and includes any -// controllers that will be responsible for managing PostgreSQL clusters using the -// 'postgrescluster' custom resource. Additionally, the manager will only watch for resources in -// the namespace specified, with an empty string resulting in the manager watching all namespaces. -func CreateRuntimeManager(namespace string, config *rest.Config, - disableMetrics bool) (manager.Manager, error) { - - options := manager.Options{ - Namespace: namespace, // if empty then watching all namespaces - SyncPeriod: &refreshInterval, - Scheme: Scheme, +// GetConfig returns a Kubernetes client configuration from KUBECONFIG or the +// service account Kubernetes gives to pods. +func GetConfig() (*rest.Config, error) { return config.GetConfig() } + +// NewManager returns a Manager that interacts with the Kubernetes API of config. +// When config is nil, it reads from KUBECONFIG or the local service account. +// When options.Scheme is nil, it uses the Scheme from this package. +func NewManager(config *rest.Config, options manager.Options) (manager.Manager, error) { + var m manager.Manager + var err error + + if config == nil { + config, err = GetConfig() } - if disableMetrics { - options.HealthProbeBindAddress = "0" - options.MetricsBindAddress = "0" + + if options.Scheme == nil { + options.Scheme = Scheme } - // create controller runtime manager - mgr, err := manager.New(config, options) - if err != nil { - return nil, err + if err == nil { + m, err = manager.New(config, options) } - return mgr, nil + return m, err } -// GetConfig creates a *rest.Config for talking to a Kubernetes API server. -func GetConfig() (*rest.Config, error) { return config.GetConfig() } +// SetLogger assigns the default Logger used by [sigs.k8s.io/controller-runtime]. +func SetLogger(logger logging.Logger) { log.SetLogger(logger) } + +// SignalHandler returns a Context that is canceled on SIGINT or SIGTERM. +func SignalHandler() context.Context { return signals.SetupSignalHandler() } diff --git a/internal/controller/runtime/ticker.go b/internal/controller/runtime/ticker.go index aaeb0ef26c..830179eafc 100644 --- a/internal/controller/runtime/ticker.go +++ b/internal/controller/runtime/ticker.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime @@ -22,24 +11,26 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" ) type ticker struct { time.Duration event.GenericEvent + Handler handler.EventHandler Immediate bool } // NewTicker returns a Source that emits e every d. -func NewTicker(d time.Duration, e event.GenericEvent) source.Source { - return &ticker{Duration: d, GenericEvent: e} +func NewTicker(d time.Duration, e event.GenericEvent, + h handler.EventHandler) source.Source { + return &ticker{Duration: d, GenericEvent: e, Handler: h} } // NewTickerImmediate returns a Source that emits e at start and every d. -func NewTickerImmediate(d time.Duration, e event.GenericEvent) source.Source { - return &ticker{Duration: d, GenericEvent: e, Immediate: true} +func NewTickerImmediate(d time.Duration, e event.GenericEvent, + h handler.EventHandler) source.Source { + return &ticker{Duration: d, GenericEvent: e, Handler: h, Immediate: true} } func (t ticker) String() string { return "every " + t.Duration.String() } @@ -47,20 +38,14 @@ func (t ticker) String() string { return "every " + t.Duration.String() } // Start is called by controller-runtime Controller and returns quickly. // It cleans up when ctx is cancelled. func (t ticker) Start( - ctx context.Context, h handler.EventHandler, - q workqueue.RateLimitingInterface, p ...predicate.Predicate, + ctx context.Context, q workqueue.RateLimitingInterface, ) error { ticker := time.NewTicker(t.Duration) // Pass t.GenericEvent to h when it is not filtered out by p. // - https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/source/internal#EventHandler emit := func() { - for _, pp := range p { - if !pp.Generic(t.GenericEvent) { - return - } - } - h.Generic(t.GenericEvent, q) + t.Handler.Generic(ctx, t.GenericEvent, q) } if t.Immediate { diff --git a/internal/controller/runtime/ticker_test.go b/internal/controller/runtime/ticker_test.go index ef52af9a33..49cecd79d7 100644 --- a/internal/controller/runtime/ticker_test.go +++ b/internal/controller/runtime/ticker_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package runtime @@ -25,7 +14,6 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" ) func TestTickerString(t *testing.T) { @@ -41,21 +29,21 @@ func TestTicker(t *testing.T) { expected := event.GenericEvent{Object: new(corev1.ConfigMap)} tq := workqueue.NewRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter()) - th := handler.Funcs{GenericFunc: func(e event.GenericEvent, q workqueue.RateLimitingInterface) { + th := handler.Funcs{GenericFunc: func(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { called = append(called, e) assert.Equal(t, q, tq, "should be called with the queue passed in Start") }} - t.Run("WithoutPredicates", func(t *testing.T) { + t.Run("NotImmediate", func(t *testing.T) { called = nil - ticker := NewTicker(100*time.Millisecond, expected) + ticker := NewTicker(100*time.Millisecond, expected, th) ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) t.Cleanup(cancel) // Start the ticker and wait for the deadline to pass. - assert.NilError(t, ticker.Start(ctx, th, tq)) + assert.NilError(t, ticker.Start(ctx, tq)) <-ctx.Done() assert.Equal(t, len(called), 2) @@ -63,36 +51,15 @@ func TestTicker(t *testing.T) { assert.Equal(t, called[1], expected, "expected at 200ms") }) - t.Run("WithPredicates", func(t *testing.T) { - called = nil - - // Predicates that exclude events after a fixed number have passed. - pLength := predicate.Funcs{GenericFunc: func(event.GenericEvent) bool { return len(called) < 3 }} - pTrue := predicate.Funcs{GenericFunc: func(event.GenericEvent) bool { return true }} - - ticker := NewTicker(50*time.Millisecond, expected) - ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) - t.Cleanup(cancel) - - // Start the ticker and wait for the deadline to pass. - assert.NilError(t, ticker.Start(ctx, th, tq, pTrue, pLength)) - <-ctx.Done() - - assert.Equal(t, len(called), 3) - assert.Equal(t, called[0], expected) - assert.Equal(t, called[1], expected) - assert.Equal(t, called[2], expected) - }) - t.Run("Immediate", func(t *testing.T) { called = nil - ticker := NewTickerImmediate(100*time.Millisecond, expected) + ticker := NewTickerImmediate(100*time.Millisecond, expected, th) ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) t.Cleanup(cancel) // Start the ticker and wait for the deadline to pass. - assert.NilError(t, ticker.Start(ctx, th, tq)) + assert.NilError(t, ticker.Start(ctx, tq)) <-ctx.Done() assert.Assert(t, len(called) > 2) diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go index cad148c768..0eaa613df8 100644 --- a/internal/controller/standalone_pgadmin/apply.go +++ b/internal/controller/standalone_pgadmin/apply.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/config.go b/internal/controller/standalone_pgadmin/config.go index a842a296ab..ddd080985b 100644 --- a/internal/controller/standalone_pgadmin/config.go +++ b/internal/controller/standalone_pgadmin/config.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go index a76cb06bf7..d1ec39bf13 100644 --- a/internal/controller/standalone_pgadmin/configmap.go +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -63,7 +53,7 @@ func configmap(pgadmin *v1beta1.PGAdmin, naming.StandalonePGAdminLabels(pgadmin.Name)) // TODO(tjmoore4): Populate configuration details. - initialize.StringMap(&configmap.Data) + initialize.Map(&configmap.Data) configSettings, err := generateConfig(pgadmin) if err == nil { configmap.Data[settingsConfigMapKey] = configSettings diff --git a/internal/controller/standalone_pgadmin/configmap_test.go b/internal/controller/standalone_pgadmin/configmap_test.go index c5f22e53cb..5a844e520c 100644 --- a/internal/controller/standalone_pgadmin/configmap_test.go +++ b/internal/controller/standalone_pgadmin/configmap_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index 77e89ea02c..81d5fc2d40 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -26,7 +16,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/source" controllerruntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" @@ -38,13 +27,14 @@ type PGAdminReconciler struct { client.Client Owner client.FieldOwner PodExec func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error Recorder record.EventRecorder IsOpenShift bool } +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list,watch} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} //+kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={list,watch} //+kubebuilder:rbac:groups="",resources="secrets",verbs={list,watch} @@ -71,11 +61,11 @@ func (r *PGAdminReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&appsv1.StatefulSet{}). Owns(&corev1.Service{}). Watches( - &source.Kind{Type: v1beta1.NewPostgresCluster()}, + v1beta1.NewPostgresCluster(), r.watchPostgresClusters(), ). Watches( - &source.Kind{Type: &corev1.Secret{}}, + &corev1.Secret{}, r.watchForRelatedSecret(), ). Complete(r) diff --git a/internal/controller/standalone_pgadmin/controller_test.go b/internal/controller/standalone_pgadmin/controller_test.go index c31ff59cd2..b0fe17cbe6 100644 --- a/internal/controller/standalone_pgadmin/controller_test.go +++ b/internal/controller/standalone_pgadmin/controller_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/helpers_test.go b/internal/controller/standalone_pgadmin/helpers_test.go index 1f099a2b53..9096edb5a1 100644 --- a/internal/controller/standalone_pgadmin/helpers_test.go +++ b/internal/controller/standalone_pgadmin/helpers_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/helpers_unit_test.go b/internal/controller/standalone_pgadmin/helpers_unit_test.go index c304702e3a..63887385fc 100644 --- a/internal/controller/standalone_pgadmin/helpers_unit_test.go +++ b/internal/controller/standalone_pgadmin/helpers_unit_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -77,7 +67,7 @@ func testVolumeClaimSpec() corev1.PersistentVolumeClaimSpec { // Defines a volume claim spec that can be used to create instances return corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi"), }, diff --git a/internal/controller/standalone_pgadmin/pod.go b/internal/controller/standalone_pgadmin/pod.go index b42ba283c5..bbb39b9322 100644 --- a/internal/controller/standalone_pgadmin/pod.go +++ b/internal/controller/standalone_pgadmin/pod.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -20,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -124,9 +115,18 @@ func pod( Name: "PGADMIN_SETUP_EMAIL", Value: fmt.Sprintf("admin@%s.%s.svc", inPGAdmin.Name, inPGAdmin.Namespace), }, + // Setting the KRB5_CONFIG for kerberos + // - https://web.mit.edu/kerberos/krb5-current/doc/admin/conf_files/krb5_conf.html { - Name: "PGADMIN_LISTEN_PORT", - Value: fmt.Sprintf("%d", pgAdminPort), + Name: "KRB5_CONFIG", + Value: configMountPath + "/krb5.conf", + }, + // In testing it was determined that we need to set this env var for the replay cache + // otherwise it defaults to the read-only location `/var/tmp/` + // - https://web.mit.edu/kerberos/krb5-current/doc/basic/rcache_def.html#replay-cache-types + { + Name: "KRB5RCACHEDIR", + Value: "/tmp", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -154,6 +154,26 @@ func pod( }, }, } + + // Creating a readiness probe that will check that the pgAdmin `/login` + // endpoint is reachable at the specified port + readinessProbe := &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt32(pgAdminPort), + Path: "/login", + Scheme: corev1.URISchemeHTTP, + }, + }, + } + gunicornData := inConfigMap.Data[gunicornConfigKey] + // Check the configmap to see if we think TLS is enabled + // If so, update the readiness check scheme to HTTPS + if strings.Contains(gunicornData, "certfile") && strings.Contains(gunicornData, "keyfile") { + readinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + } + container.ReadinessProbe = readinessProbe + startup := corev1.Container{ Name: naming.ContainerPGAdminStartup, Command: startupCommand(), @@ -325,16 +345,16 @@ loadServerCommand // descriptor gets closed and reopened to use the builtin `[ -nt` to check mtimes. // - https://unix.stackexchange.com/a/407383 var reloadScript = ` -exec {fd}<> <(:) -while read -r -t 5 -u "${fd}" || true; do - if [ "${cluster_file}" -nt "/proc/self/fd/${fd}" ] && loadServerCommand +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded shared servers dated %y' "${cluster_file}" fi - if [ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ] + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] then - if [ $APP_RELEASE -eq 7 ]; then + if [[ $APP_RELEASE -eq 7 ]]; then ` + startCommandV7 + ` else ` + startCommandV8 + ` @@ -411,12 +431,11 @@ with open('` + configMountPath + `/` + gunicornConfigFilePath + `') as _f: script := strings.Join([]string{ // Use the initContainer to create this path to avoid the error noted here: - // - https://github.com/kubernetes/kubernetes/issues/121294 - `mkdir -p /etc/pgadmin/conf.d`, - // Write the system configuration into a read-only file. - `(umask a-w && echo "$1" > ` + scriptMountPath + `/config_system.py` + `)`, - // Write the server configuration into a read-only file. - `(umask a-w && echo "$2" > ` + scriptMountPath + `/gunicorn_config.py` + `)`, + // - https://issue.k8s.io/121294 + `mkdir -p ` + configMountPath, + // Write the system and server configurations. + `echo "$1" > ` + scriptMountPath + `/config_system.py`, + `echo "$2" > ` + scriptMountPath + `/gunicorn_config.py`, }, "\n") return append([]string{"bash", "-ceu", "--", script, "startup"}, args...) diff --git a/internal/controller/standalone_pgadmin/pod_test.go b/internal/controller/standalone_pgadmin/pod_test.go index af5b9e0bea..19cee52882 100644 --- a/internal/controller/standalone_pgadmin/pod_test.go +++ b/internal/controller/standalone_pgadmin/pod_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -80,16 +70,16 @@ containers: } loadServerCommand - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${cluster_file}" -nt "/proc/self/fd/${fd}" ] && loadServerCommand + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded shared servers dated %y' "${cluster_file}" fi - if [ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ] + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] then - if [ $APP_RELEASE -eq 7 ]; then + if [[ $APP_RELEASE -eq 7 ]]; then pgadmin4 & else gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & @@ -104,13 +94,20 @@ containers: env: - name: PGADMIN_SETUP_EMAIL value: admin@pgadmin.postgres-operator.svc - - name: PGADMIN_LISTEN_PORT - value: "5050" + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp name: pgadmin ports: - containerPort: 5050 name: pgadmin protocol: TCP + readinessProbe: + httpGet: + path: /login + port: 5050 + scheme: HTTP resources: {} securityContext: allowPrivilegeEscalation: false @@ -142,8 +139,8 @@ initContainers: - -- - |- mkdir -p /etc/pgadmin/conf.d - (umask a-w && echo "$1" > /etc/pgadmin/config_system.py) - (umask a-w && echo "$2" > /etc/pgadmin/gunicorn_config.py) + echo "$1" > /etc/pgadmin/config_system.py + echo "$2" > /etc/pgadmin/gunicorn_config.py - startup - | import glob, json, re, os @@ -258,16 +255,16 @@ containers: } loadServerCommand - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${cluster_file}" -nt "/proc/self/fd/${fd}" ] && loadServerCommand + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${cluster_file}" -nt "/proc/self/fd/${fd}" ]] && loadServerCommand then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded shared servers dated %y' "${cluster_file}" fi - if [ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ] + if [[ ! -d /proc/$(cat $PGADMIN4_PIDFILE) ]] then - if [ $APP_RELEASE -eq 7 ]; then + if [[ $APP_RELEASE -eq 7 ]]; then pgadmin4 & else gunicorn -c /etc/pgadmin/gunicorn_config.py --chdir $PGADMIN_DIR pgAdmin4:app & @@ -282,8 +279,10 @@ containers: env: - name: PGADMIN_SETUP_EMAIL value: admin@pgadmin.postgres-operator.svc - - name: PGADMIN_LISTEN_PORT - value: "5050" + - name: KRB5_CONFIG + value: /etc/pgadmin/conf.d/krb5.conf + - name: KRB5RCACHEDIR + value: /tmp image: new-image imagePullPolicy: Always name: pgadmin @@ -291,6 +290,11 @@ containers: - containerPort: 5050 name: pgadmin protocol: TCP + readinessProbe: + httpGet: + path: /login + port: 5050 + scheme: HTTP resources: requests: cpu: 100m @@ -324,8 +328,8 @@ initContainers: - -- - |- mkdir -p /etc/pgadmin/conf.d - (umask a-w && echo "$1" > /etc/pgadmin/config_system.py) - (umask a-w && echo "$2" > /etc/pgadmin/gunicorn_config.py) + echo "$1" > /etc/pgadmin/config_system.py + echo "$2" > /etc/pgadmin/gunicorn_config.py - startup - | import glob, json, re, os diff --git a/internal/controller/standalone_pgadmin/postgrescluster.go b/internal/controller/standalone_pgadmin/postgrescluster.go index 5ad48e915b..5327b8ae70 100644 --- a/internal/controller/standalone_pgadmin/postgrescluster.go +++ b/internal/controller/standalone_pgadmin/postgrescluster.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go index 7d96234f15..2453a6a1fa 100644 --- a/internal/controller/standalone_pgadmin/service.go +++ b/internal/controller/standalone_pgadmin/service.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/service_test.go b/internal/controller/standalone_pgadmin/service_test.go index 0db7ce3bbb..24b20c8247 100644 --- a/internal/controller/standalone_pgadmin/service_test.go +++ b/internal/controller/standalone_pgadmin/service_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index 68a886efa1..e086e333f4 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -104,10 +94,7 @@ func statefulset( // Use scheduling constraints from the cluster spec. sts.Spec.Template.Spec.Affinity = pgadmin.Spec.Affinity sts.Spec.Template.Spec.Tolerations = pgadmin.Spec.Tolerations - - if pgadmin.Spec.PriorityClassName != nil { - sts.Spec.Template.Spec.PriorityClassName = *pgadmin.Spec.PriorityClassName - } + sts.Spec.Template.Spec.PriorityClassName = initialize.FromPointer(pgadmin.Spec.PriorityClassName) // Restart containers any time they stop, die, are killed, etc. // - https://docs.k8s.io/concepts/workloads/pods/pod-lifecycle/#restart-policy diff --git a/internal/controller/standalone_pgadmin/statefulset_test.go b/internal/controller/standalone_pgadmin/statefulset_test.go index dea5b983b4..52c501b357 100644 --- a/internal/controller/standalone_pgadmin/statefulset_test.go +++ b/internal/controller/standalone_pgadmin/statefulset_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 12cac3f7d7..3c9a3ce05b 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -80,9 +70,9 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return r.PodExec(pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) + return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } } if podExecutor == nil { diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index 1a864c546d..409fcea701 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -76,6 +66,10 @@ func TestReconcilePGAdminUsers(t *testing.T) { t.Run("PodTerminating", func(t *testing.T) { pod := pod.DeepCopy() + // Must add finalizer when adding deletion timestamp otherwise fake client will panic: + // https://github.com/kubernetes-sigs/controller-runtime/pull/2316 + pod.Finalizers = append(pod.Finalizers, "some-finalizer") + pod.DeletionTimestamp = new(metav1.Time) *pod.DeletionTimestamp = metav1.Now() pod.Status.ContainerStatuses = @@ -107,7 +101,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { calls := 0 r.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -146,7 +140,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { calls := 0 r.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -172,14 +166,14 @@ func TestReconcilePGAdminMajorVersion(t *testing.T) { reconciler := &PGAdminReconciler{} podExecutor := func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return reconciler.PodExec(pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) + return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) } t.Run("SuccessfulRetrieval", func(t *testing.T) { reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { assert.Equal(t, pod, "pgadmin-123-0") @@ -199,7 +193,7 @@ func TestReconcilePGAdminMajorVersion(t *testing.T) { t.Run("FailedRetrieval", func(t *testing.T) { reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { // Simulate the python call giving bad data (not a version int) @@ -214,7 +208,7 @@ func TestReconcilePGAdminMajorVersion(t *testing.T) { t.Run("PodExecError", func(t *testing.T) { reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { return errors.New("PodExecError") @@ -277,9 +271,9 @@ func TestWritePGAdminUsers(t *testing.T) { pod.Name = fmt.Sprintf("pgadmin-%s-0", pgadmin.UID) podExecutor := func( - _ context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, + ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { - return reconciler.PodExec(pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) + return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) } t.Run("CreateOneUser", func(t *testing.T) { @@ -298,7 +292,7 @@ func TestWritePGAdminUsers(t *testing.T) { calls := 0 reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -356,7 +350,7 @@ func TestWritePGAdminUsers(t *testing.T) { addUserCalls := 0 updateUserCalls := 0 reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -428,7 +422,7 @@ func TestWritePGAdminUsers(t *testing.T) { addUserCalls := 0 updateUserCalls := 0 reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -481,7 +475,7 @@ func TestWritePGAdminUsers(t *testing.T) { } calls := 0 reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -522,7 +516,7 @@ func TestWritePGAdminUsers(t *testing.T) { // PodExec error calls := 0 reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -548,7 +542,7 @@ func TestWritePGAdminUsers(t *testing.T) { // setup.py error in stderr reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -601,7 +595,7 @@ func TestWritePGAdminUsers(t *testing.T) { // PodExec error calls := 0 reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -628,7 +622,7 @@ func TestWritePGAdminUsers(t *testing.T) { // setup.py error in stderr reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -656,7 +650,7 @@ func TestWritePGAdminUsers(t *testing.T) { // setup.py error in stdout regarding email address reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ @@ -685,7 +679,7 @@ func TestWritePGAdminUsers(t *testing.T) { // setup.py error in stdout regarding password reconciler.PodExec = func( - namespace, pod, container string, + ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { calls++ diff --git a/internal/controller/standalone_pgadmin/volume.go b/internal/controller/standalone_pgadmin/volume.go index dd488b6c62..7615f6142b 100644 --- a/internal/controller/standalone_pgadmin/volume.go +++ b/internal/controller/standalone_pgadmin/volume.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/controller/standalone_pgadmin/volume_test.go b/internal/controller/standalone_pgadmin/volume_test.go index 41fd67f37e..645c228277 100644 --- a/internal/controller/standalone_pgadmin/volume_test.go +++ b/internal/controller/standalone_pgadmin/volume_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -56,7 +46,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { Spec: v1beta1.PGAdminSpec{ DataVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ corev1.ResourceStorage: resource.MustParse("1Gi")}}, StorageClassName: initialize.String("storage-class-for-data"), diff --git a/internal/controller/standalone_pgadmin/watches.go b/internal/controller/standalone_pgadmin/watches.go index 38723c0423..49ac1ebd29 100644 --- a/internal/controller/standalone_pgadmin/watches.go +++ b/internal/controller/standalone_pgadmin/watches.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin @@ -29,8 +18,7 @@ import ( // watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. func (r *PGAdminReconciler) watchPostgresClusters() handler.Funcs { - handle := func(cluster client.Object, q workqueue.RateLimitingInterface) { - ctx := context.Background() + handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { for _, pgadmin := range r.findPGAdminsForPostgresCluster(ctx, cluster) { q.Add(ctrl.Request{ @@ -40,14 +28,14 @@ func (r *PGAdminReconciler) watchPostgresClusters() handler.Funcs { } return handler.Funcs{ - CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(e.ObjectNew, q) + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) }, - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, } } @@ -55,8 +43,7 @@ func (r *PGAdminReconciler) watchPostgresClusters() handler.Funcs { // watchForRelatedSecret handles create/update/delete events for secrets, // passing the Secret ObjectKey to findPGAdminsForSecret func (r *PGAdminReconciler) watchForRelatedSecret() handler.EventHandler { - handle := func(secret client.Object, q workqueue.RateLimitingInterface) { - ctx := context.Background() + handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { key := client.ObjectKeyFromObject(secret) for _, pgadmin := range r.findPGAdminsForSecret(ctx, key) { @@ -67,11 +54,11 @@ func (r *PGAdminReconciler) watchForRelatedSecret() handler.EventHandler { } return handler.Funcs{ - CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(e.ObjectNew, q) + UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.ObjectNew, q) }, // If the secret is deleted, we want to reconcile // in order to emit an event/status about this problem. @@ -79,8 +66,8 @@ func (r *PGAdminReconciler) watchForRelatedSecret() handler.EventHandler { // when we reconcile the cluster and can't find the secret. // That way, users will get two alerts: one when the secret is deleted // and another when the cluster is being reconciled. - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(e.Object, q) + DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + handle(ctx, e.Object, q) }, } } diff --git a/internal/controller/standalone_pgadmin/watches_test.go b/internal/controller/standalone_pgadmin/watches_test.go index 0afc097a7f..1419eb9efa 100644 --- a/internal/controller/standalone_pgadmin/watches_test.go +++ b/internal/controller/standalone_pgadmin/watches_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package standalone_pgadmin diff --git a/internal/feature/features.go b/internal/feature/features.go new file mode 100644 index 0000000000..db424ead42 --- /dev/null +++ b/internal/feature/features.go @@ -0,0 +1,132 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Package feature provides types and functions to enable and disable features +of the Postgres Operator. + +To add a new feature, export its name as a constant string and configure it +in [NewGate]. Choose a name that is clear to end users, as they will use it +to enable or disable the feature. + +# Stages + +Each feature must be configured with a maturity called a stage. We follow the +Kubernetes convention that features in the "Alpha" stage are disabled by default, +while those in the "Beta" stage are enabled by default. + - https://docs.k8s.io/reference/command-line-tools-reference/feature-gates/#feature-stages + +NOTE: Since Kubernetes 1.24, APIs (not features) in the "Beta" stage are disabled by default: + - https://blog.k8s.io/2022/05/03/kubernetes-1-24-release-announcement/#beta-apis-off-by-default + - https://git.k8s.io/enhancements/keps/sig-architecture/3136-beta-apis-off-by-default#goals + +# Using Features + +We initialize and configure one [MutableGate] in main() and add it to the Context +passed to Reconcilers and other Runnables. Those can then interrogate it using [Enabled]: + + if !feature.Enabled(ctx, feature.Excellent) { return } + +Tests should create and configure their own [MutableGate] and inject it using +[NewContext]. For example, the following enables one feature and disables another: + + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.Excellent: true, + feature.Uncommon: false, + })) + ctx := feature.NewContext(context.Background(), gate) +*/ +package feature + +import ( + "context" + + "k8s.io/component-base/featuregate" +) + +type Feature = featuregate.Feature + +// Gate indicates what features exist and which are enabled. +type Gate interface { + Enabled(Feature) bool + String() string +} + +// MutableGate contains features that can be enabled or disabled. +type MutableGate interface { + Gate + // Set enables or disables features by parsing a string like "feature1=true,feature2=false". + Set(string) error + // SetFromMap enables or disables features by boolean values. + SetFromMap(map[string]bool) error +} + +const ( + // Support appending custom queries to default PGMonitor queries + AppendCustomQueries = "AppendCustomQueries" + + // Enables automatic creation of user schema + AutoCreateUserSchema = "AutoCreateUserSchema" + + // Support automatically growing volumes + AutoGrowVolumes = "AutoGrowVolumes" + + BridgeIdentifiers = "BridgeIdentifiers" + + // Support custom sidecars for PostgreSQL instance Pods + InstanceSidecars = "InstanceSidecars" + + // Support custom sidecars for pgBouncer Pods + PGBouncerSidecars = "PGBouncerSidecars" + + // Support tablespace volumes + TablespaceVolumes = "TablespaceVolumes" + + // Support VolumeSnapshots + VolumeSnapshots = "VolumeSnapshots" +) + +// NewGate returns a MutableGate with the Features defined in this package. +func NewGate() MutableGate { + gate := featuregate.NewFeatureGate() + + if err := gate.Add(map[Feature]featuregate.FeatureSpec{ + AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, + AutoCreateUserSchema: {Default: true, PreRelease: featuregate.Beta}, + AutoGrowVolumes: {Default: false, PreRelease: featuregate.Alpha}, + BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, + InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, + PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, + TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, + VolumeSnapshots: {Default: false, PreRelease: featuregate.Alpha}, + }); err != nil { + panic(err) + } + + return gate +} + +type contextKey struct{} + +// Enabled indicates if a Feature is enabled in the Gate contained in ctx. It +// returns false when there is no Gate. +func Enabled(ctx context.Context, f Feature) bool { + gate, ok := ctx.Value(contextKey{}).(Gate) + return ok && gate.Enabled(f) +} + +// NewContext returns a copy of ctx containing gate. Check it using [Enabled]. +func NewContext(ctx context.Context, gate Gate) context.Context { + return context.WithValue(ctx, contextKey{}, gate) +} + +func ShowGates(ctx context.Context) string { + featuresEnabled := "" + gate, ok := ctx.Value(contextKey{}).(Gate) + if ok { + featuresEnabled = gate.String() + } + return featuresEnabled +} diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go new file mode 100644 index 0000000000..f76dd216e6 --- /dev/null +++ b/internal/feature/features_test.go @@ -0,0 +1,65 @@ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package feature + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" +) + +func TestDefaults(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.Assert(t, false == gate.Enabled(AppendCustomQueries)) + assert.Assert(t, true == gate.Enabled(AutoCreateUserSchema)) + assert.Assert(t, false == gate.Enabled(AutoGrowVolumes)) + assert.Assert(t, false == gate.Enabled(BridgeIdentifiers)) + assert.Assert(t, false == gate.Enabled(InstanceSidecars)) + assert.Assert(t, false == gate.Enabled(PGBouncerSidecars)) + assert.Assert(t, false == gate.Enabled(TablespaceVolumes)) + assert.Assert(t, false == gate.Enabled(VolumeSnapshots)) + + assert.Equal(t, gate.String(), "") +} + +func TestStringFormat(t *testing.T) { + t.Parallel() + gate := NewGate() + + assert.NilError(t, gate.Set("")) + assert.NilError(t, gate.Set("TablespaceVolumes=true")) + assert.Equal(t, gate.String(), "TablespaceVolumes=true") + assert.Assert(t, true == gate.Enabled(TablespaceVolumes)) + + err := gate.Set("NotAGate=true") + assert.ErrorContains(t, err, "unrecognized feature gate") + assert.ErrorContains(t, err, "NotAGate") + + err = gate.Set("GateNotSet") + assert.ErrorContains(t, err, "missing bool") + assert.ErrorContains(t, err, "GateNotSet") + + err = gate.Set("GateNotSet=foo") + assert.ErrorContains(t, err, "invalid value") + assert.ErrorContains(t, err, "GateNotSet") +} + +func TestContext(t *testing.T) { + t.Parallel() + gate := NewGate() + ctx := NewContext(context.Background(), gate) + assert.Equal(t, ShowGates(ctx), "") + + assert.NilError(t, gate.Set("TablespaceVolumes=true")) + assert.Assert(t, true == Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=true") + + assert.NilError(t, gate.SetFromMap(map[string]bool{TablespaceVolumes: false})) + assert.Assert(t, false == Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=false") +} diff --git a/internal/initialize/doc.go b/internal/initialize/doc.go index 34e34e5cb9..aedd85846f 100644 --- a/internal/initialize/doc.go +++ b/internal/initialize/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package initialize provides functions to initialize some common fields and types. package initialize diff --git a/internal/initialize/intstr.go b/internal/initialize/intstr.go deleted file mode 100644 index d6efe71885..0000000000 --- a/internal/initialize/intstr.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package initialize - -import ( - "k8s.io/apimachinery/pkg/util/intstr" -) - -// IntOrStringInt32 returns an *intstr.IntOrString containing i. -func IntOrStringInt32(i int32) *intstr.IntOrString { - return IntOrString(intstr.FromInt(int(i))) -} - -// IntOrStringString returns an *intstr.IntOrString containing s. -func IntOrStringString(s string) *intstr.IntOrString { - return IntOrString(intstr.FromString(s)) -} - -// IntOrString returns a pointer to the provided IntOrString -func IntOrString(ios intstr.IntOrString) *intstr.IntOrString { - return &ios -} diff --git a/internal/initialize/intstr_test.go b/internal/initialize/intstr_test.go deleted file mode 100644 index 388c3795b2..0000000000 --- a/internal/initialize/intstr_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package initialize_test - -import ( - "testing" - - "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/crunchydata/postgres-operator/internal/initialize" -) - -func TestIntOrStringInt32(t *testing.T) { - // Same content as the upstream constructor. - upstream := intstr.FromInt(42) - n := initialize.IntOrStringInt32(42) - - assert.DeepEqual(t, &upstream, n) -} - -func TestIntOrStringString(t *testing.T) { - upstream := intstr.FromString("50%") - s := initialize.IntOrStringString("50%") - - assert.DeepEqual(t, &upstream, s) -} -func TestIntOrString(t *testing.T) { - upstream := intstr.FromInt(0) - - ios := initialize.IntOrString(intstr.FromInt(0)) - assert.DeepEqual(t, *ios, upstream) -} diff --git a/internal/initialize/metadata.go b/internal/initialize/metadata.go index f27d4c6751..d62530736a 100644 --- a/internal/initialize/metadata.go +++ b/internal/initialize/metadata.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize diff --git a/internal/initialize/metadata_test.go b/internal/initialize/metadata_test.go index 280b73abde..735e455a2e 100644 --- a/internal/initialize/metadata_test.go +++ b/internal/initialize/metadata_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test diff --git a/internal/initialize/primitives.go b/internal/initialize/primitives.go index e3954ba436..9bc264f88c 100644 --- a/internal/initialize/primitives.go +++ b/internal/initialize/primitives.go @@ -1,30 +1,12 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize // Bool returns a pointer to v. func Bool(v bool) *bool { return &v } -// ByteMap initializes m when it points to nil. -func ByteMap(m *map[string][]byte) { - if m != nil && *m == nil { - *m = make(map[string][]byte) - } -} - // FromPointer returns the value that p points to. // When p is nil, it returns the zero value of T. func FromPointer[T any](p *T) T { @@ -41,15 +23,17 @@ func Int32(v int32) *int32 { return &v } // Int64 returns a pointer to v. func Int64(v int64) *int64 { return &v } +// Map initializes m when it points to nil. +func Map[M ~map[K]V, K comparable, V any](m *M) { + // See https://pkg.go.dev/maps for similar type constraints. + + if m != nil && *m == nil { + *m = make(M) + } +} + // Pointer returns a pointer to v. func Pointer[T any](v T) *T { return &v } // String returns a pointer to v. func String(v string) *string { return &v } - -// StringMap initializes m when it points to nil. -func StringMap(m *map[string]string) { - if m != nil && *m == nil { - *m = make(map[string]string) - } -} diff --git a/internal/initialize/primitives_test.go b/internal/initialize/primitives_test.go index 45829374e7..e39898b4fe 100644 --- a/internal/initialize/primitives_test.go +++ b/internal/initialize/primitives_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test @@ -35,27 +24,6 @@ func TestBool(t *testing.T) { } } -func TestByteMap(t *testing.T) { - // Ignores nil pointer. - initialize.ByteMap(nil) - - var m map[string][]byte - - // Starts nil. - assert.Assert(t, m == nil) - - // Gets initialized. - initialize.ByteMap(&m) - assert.DeepEqual(t, m, map[string][]byte{}) - - // Now writable. - m["x"] = []byte("y") - - // Doesn't overwrite. - initialize.ByteMap(&m) - assert.DeepEqual(t, m, map[string][]byte{"x": []byte("y")}) -} - func TestFromPointer(t *testing.T) { t.Run("bool", func(t *testing.T) { assert.Equal(t, initialize.FromPointer((*bool)(nil)), false) @@ -118,6 +86,50 @@ func TestInt64(t *testing.T) { } } +func TestMap(t *testing.T) { + t.Run("map[string][]byte", func(t *testing.T) { + // Ignores nil pointer. + initialize.Map((*map[string][]byte)(nil)) + + var m map[string][]byte + + // Starts nil. + assert.Assert(t, m == nil) + + // Gets initialized. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string][]byte{}) + + // Now writable. + m["x"] = []byte("y") + + // Doesn't overwrite. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string][]byte{"x": []byte("y")}) + }) + + t.Run("map[string]string", func(t *testing.T) { + // Ignores nil pointer. + initialize.Map((*map[string]string)(nil)) + + var m map[string]string + + // Starts nil. + assert.Assert(t, m == nil) + + // Gets initialized. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string]string{}) + + // Now writable. + m["x"] = "y" + + // Doesn't overwrite. + initialize.Map(&m) + assert.DeepEqual(t, m, map[string]string{"x": "y"}) + }) +} + func TestPointer(t *testing.T) { t.Run("bool", func(t *testing.T) { n := initialize.Pointer(false) @@ -189,24 +201,3 @@ func TestString(t *testing.T) { assert.Equal(t, *n, "sup") } } - -func TestStringMap(t *testing.T) { - // Ignores nil pointer. - initialize.StringMap(nil) - - var m map[string]string - - // Starts nil. - assert.Assert(t, m == nil) - - // Gets initialized. - initialize.StringMap(&m) - assert.DeepEqual(t, m, map[string]string{}) - - // Now writable. - m["x"] = "y" - - // Doesn't overwrite. - initialize.StringMap(&m) - assert.DeepEqual(t, m, map[string]string{"x": "y"}) -} diff --git a/internal/initialize/security.go b/internal/initialize/security.go index 49291db478..5dd52d7b1e 100644 --- a/internal/initialize/security.go +++ b/internal/initialize/security.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize diff --git a/internal/initialize/security_test.go b/internal/initialize/security_test.go index 86ff98f701..0a6409cf41 100644 --- a/internal/initialize/security_test.go +++ b/internal/initialize/security_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package initialize_test diff --git a/internal/kubeapi/patch.go b/internal/kubeapi/patch.go index 992040a8d3..973852c17a 100644 --- a/internal/kubeapi/patch.go +++ b/internal/kubeapi/patch.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package kubeapi diff --git a/internal/kubeapi/patch_test.go b/internal/kubeapi/patch_test.go index 5307531228..52f5787b8f 100644 --- a/internal/kubeapi/patch_test.go +++ b/internal/kubeapi/patch_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package kubeapi diff --git a/internal/logging/logr.go b/internal/logging/logr.go index 4eadfe84ef..c907997d40 100644 --- a/internal/logging/logr.go +++ b/internal/logging/logr.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging @@ -24,21 +13,24 @@ import ( var global = logr.Discard() -// Discard returns a logr.Logger that discards all messages logged to it. -func Discard() logr.Logger { return logr.Discard() } +// Logger is an interface to an abstract logging implementation. +type Logger = logr.Logger + +// Discard returns a Logger that discards all messages logged to it. +func Discard() Logger { return logr.Discard() } -// SetLogSink replaces the global logr.Logger with sink. Before this is called, -// the global logr.Logger is a no-op. +// SetLogSink replaces the global Logger with sink. Before this is called, +// the global Logger is a no-op. func SetLogSink(sink logr.LogSink) { global = logr.New(sink) } // NewContext returns a copy of ctx containing logger. Retrieve it using FromContext. -func NewContext(ctx context.Context, logger logr.Logger) context.Context { +func NewContext(ctx context.Context, logger Logger) context.Context { return logr.NewContext(ctx, logger) } -// FromContext returns the global logr.Logger or the one stored by a prior call +// FromContext returns the global Logger or the one stored by a prior call // to NewContext. -func FromContext(ctx context.Context) logr.Logger { +func FromContext(ctx context.Context) Logger { log, err := logr.FromContext(ctx) if err != nil { log = global diff --git a/internal/logging/logr_test.go b/internal/logging/logr_test.go index 2d9002650a..1cbc818ad9 100644 --- a/internal/logging/logr_test.go +++ b/internal/logging/logr_test.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging diff --git a/internal/logging/logrus.go b/internal/logging/logrus.go index 0f3d441d20..9683a104d1 100644 --- a/internal/logging/logrus.go +++ b/internal/logging/logrus.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging diff --git a/internal/logging/logrus_test.go b/internal/logging/logrus_test.go index ee5777e6a0..3e73193d1a 100644 --- a/internal/logging/logrus_test.go +++ b/internal/logging/logrus_test.go @@ -1,17 +1,6 @@ -/* -Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package logging diff --git a/internal/naming/annotations.go b/internal/naming/annotations.go index 821cc14cdf..2179a5f084 100644 --- a/internal/naming/annotations.go +++ b/internal/naming/annotations.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -32,19 +21,17 @@ const ( // ID associated with a specific manual backup Job. PGBackRestBackup = annotationPrefix + "pgbackrest-backup" + // PGBackRestBackupJobCompletion is the annotation that is added to restore jobs, pvcs, and + // VolumeSnapshots that are involved in the volume snapshot creation process. The annotation + // holds a RFC3339 formatted timestamp that corresponds to the completion time of the associated + // backup job. + PGBackRestBackupJobCompletion = annotationPrefix + "pgbackrest-backup-job-completion" + // PGBackRestConfigHash is an annotation used to specify the hash value associated with a // repo configuration as needed to detect configuration changes that invalidate running Jobs // (and therefore must be recreated) PGBackRestConfigHash = annotationPrefix + "pgbackrest-hash" - // PGBackRestCurrentConfig is an annotation used to indicate the name of the pgBackRest - // configuration associated with a specific Job as determined by either the current primary - // (if no dedicated repository host is enabled), or the dedicated repository host. This helps - // in detecting pgBackRest backup Jobs that no longer mount the proper pgBackRest - // configuration, e.g. because a failover has occurred, or because dedicated repo host has been - // enabled or disabled. - PGBackRestCurrentConfig = annotationPrefix + "pgbackrest-config" - // PGBackRestRestore is the annotation that is added to a PostgresCluster to initiate an in-place // restore. The value of the annotation will be a unique identifier for a restore Job (e.g. a // timestamp), which will be stored in the PostgresCluster status to properly track completion @@ -70,4 +57,15 @@ const ( // bridge cluster, the user must add this annotation to the CR to allow the CR to take control of // the Bridge Cluster. The Value assigned to the annotation must be the ID of existing cluster. CrunchyBridgeClusterAdoptionAnnotation = annotationPrefix + "adopt-bridge-cluster" + + // AutoCreateUserSchemaAnnotation is an annotation used to allow users to control whether the cluster + // has schemas automatically created for the users defined in `spec.users` for all of the databases + // listed for that user. + AutoCreateUserSchemaAnnotation = annotationPrefix + "autoCreateUserSchema" + + // AuthorizeBackupRemovalAnnotation is an annotation used to allow users + // to delete PVC-based backups when changing from a cluster with backups + // to a cluster without backups. As usual with the operator, we do not + // touch cloud-based backups. + AuthorizeBackupRemovalAnnotation = annotationPrefix + "authorizeBackupRemoval" ) diff --git a/internal/naming/annotations_test.go b/internal/naming/annotations_test.go index d6f276ea5c..318dd5ab5c 100644 --- a/internal/naming/annotations_test.go +++ b/internal/naming/annotations_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -23,13 +12,15 @@ import ( ) func TestAnnotationsValid(t *testing.T) { + assert.Assert(t, nil == validation.IsQualifiedName(AuthorizeBackupRemovalAnnotation)) + assert.Assert(t, nil == validation.IsQualifiedName(AutoCreateUserSchemaAnnotation)) + assert.Assert(t, nil == validation.IsQualifiedName(CrunchyBridgeClusterAdoptionAnnotation)) assert.Assert(t, nil == validation.IsQualifiedName(Finalizer)) assert.Assert(t, nil == validation.IsQualifiedName(PatroniSwitchover)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackup)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackupJobCompletion)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestConfigHash)) - assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestCurrentConfig)) - assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestRestore)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestIPVersion)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestRestore)) assert.Assert(t, nil == validation.IsQualifiedName(PostgresExporterCollectorsAnnotation)) - assert.Assert(t, nil == validation.IsQualifiedName(CrunchyBridgeClusterAdoptionAnnotation)) } diff --git a/internal/naming/controllers.go b/internal/naming/controllers.go index 35a1f8dd48..3d492e8a3a 100644 --- a/internal/naming/controllers.go +++ b/internal/naming/controllers.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/naming/dns.go b/internal/naming/dns.go index b013cd69c7..d3351a5d70 100644 --- a/internal/naming/dns.go +++ b/internal/naming/dns.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/naming/dns_test.go b/internal/naming/dns_test.go index 70c38f71ca..e7e2ea9dc6 100644 --- a/internal/naming/dns_test.go +++ b/internal/naming/dns_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/naming/doc.go b/internal/naming/doc.go index 336193e5b6..72cab8b0b0 100644 --- a/internal/naming/doc.go +++ b/internal/naming/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package naming provides functions and constants for the postgres-operator // naming and labeling scheme. diff --git a/internal/naming/labels.go b/internal/naming/labels.go index 6c4d02b2d9..f25993122b 100644 --- a/internal/naming/labels.go +++ b/internal/naming/labels.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -119,6 +108,9 @@ const ( // RoleMonitoring is the LabelRole applied to Monitoring resources RoleMonitoring = "monitoring" + + // RoleSnapshot is the LabelRole applied to Snapshot resources. + RoleSnapshot = "snapshot" ) const ( @@ -151,6 +143,9 @@ const ( // BackupReplicaCreate is the backup type for the backup taken to enable pgBackRest replica // creation BackupReplicaCreate BackupJobType = "replica-create" + + // BackupScheduled is the backup type utilized for scheduled backups + BackupScheduled BackupJobType = "scheduled" ) const ( @@ -270,6 +265,7 @@ func PGBackRestCronJobLabels(clusterName, repoName, backupType string) labels.Se cronJobLabels := map[string]string{ LabelPGBackRestRepo: repoName, LabelPGBackRestCronJob: backupType, + LabelPGBackRestBackup: string(BackupScheduled), } return labels.Merge(commonLabels, cronJobLabels) } diff --git a/internal/naming/labels_test.go b/internal/naming/labels_test.go index ebd82fc11e..b8a7779858 100644 --- a/internal/naming/labels_test.go +++ b/internal/naming/labels_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -62,7 +51,9 @@ func TestLabelValuesValid(t *testing.T) { assert.Assert(t, nil == validation.IsValidLabelValue(RolePostgresWAL)) assert.Assert(t, nil == validation.IsValidLabelValue(RolePrimary)) assert.Assert(t, nil == validation.IsValidLabelValue(RoleReplica)) + assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupManual))) assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupReplicaCreate))) + assert.Assert(t, nil == validation.IsValidLabelValue(string(BackupScheduled))) assert.Assert(t, nil == validation.IsValidLabelValue(RoleMonitoring)) assert.Assert(t, nil == validation.IsValidLabelValue(RoleCrunchyBridgeClusterPostgresRole)) } @@ -193,6 +184,7 @@ func TestPGBackRestLabelFuncs(t *testing.T) { assert.Equal(t, pgBackRestCronJobLabels.Get(LabelCluster), clusterName) assert.Check(t, pgBackRestCronJobLabels.Has(LabelPGBackRest)) assert.Equal(t, pgBackRestCronJobLabels.Get(LabelPGBackRestRepo), repoName) + assert.Equal(t, pgBackRestCronJobLabels.Get(LabelPGBackRestBackup), string(BackupScheduled)) // verify the labels that identify pgBackRest dedicated repository host resources pgBackRestDedicatedLabels := PGBackRestDedicatedLabels(clusterName) diff --git a/internal/naming/limitations.md b/internal/naming/limitations.md index 78d3721088..ba607215f7 100644 --- a/internal/naming/limitations.md +++ b/internal/naming/limitations.md @@ -1,16 +1,7 @@ # Definitions diff --git a/internal/naming/names.go b/internal/naming/names.go index 64c8cba23b..369591de91 100644 --- a/internal/naming/names.go +++ b/internal/naming/names.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -260,6 +249,24 @@ func ClusterReplicaService(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// ClusterDedicatedSnapshotVolume returns the ObjectMeta for the dedicated Snapshot +// volume for a cluster. +func ClusterDedicatedSnapshotVolume(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.GetNamespace(), + Name: cluster.GetName() + "-snapshot", + } +} + +// ClusterVolumeSnapshot returns the ObjectMeta, including a random name, for a +// new pgdata VolumeSnapshot. +func ClusterVolumeSnapshot(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-pgdata-snapshot-" + rand.String(4), + } +} + // GenerateInstance returns a random name for a member of cluster and set. func GenerateInstance( cluster *v1beta1.PostgresCluster, set *v1beta1.PostgresInstanceSetSpec, diff --git a/internal/naming/names_test.go b/internal/naming/names_test.go index b8663be022..27835c3e5d 100644 --- a/internal/naming/names_test.go +++ b/internal/naming/names_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -76,8 +65,8 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { value metav1.ObjectMeta } - testUniqueAndValid := func(t *testing.T, tests []test) sets.String { - names := sets.NewString() + testUniqueAndValid := func(t *testing.T, tests []test) sets.Set[string] { + names := sets.Set[string]{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.value.Namespace, cluster.Namespace) @@ -170,7 +159,7 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { assert.Assert(t, nil == validation.IsDNS1123Label(value.Name)) prefix := PostgresUserSecret(cluster, "").Name - for _, name := range names.List() { + for _, name := range sets.List(names) { assert.Assert(t, !strings.HasPrefix(name, prefix), "%q may collide", name) } }) @@ -209,6 +198,12 @@ func TestClusterNamesUniqueAndValid(t *testing.T) { {"PGBackRestRepoVolume", PGBackRestRepoVolume(cluster, repoName)}, }) }) + + t.Run("VolumeSnapshots", func(t *testing.T) { + testUniqueAndValid(t, []test{ + {"ClusterVolumeSnapshot", ClusterVolumeSnapshot(cluster)}, + }) + }) } func TestInstanceNamesUniqueAndValid(t *testing.T) { diff --git a/internal/naming/selectors.go b/internal/naming/selectors.go index 0fe9e7bbe7..94dbc3a9fa 100644 --- a/internal/naming/selectors.go +++ b/internal/naming/selectors.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -46,6 +35,30 @@ func Cluster(cluster string) metav1.LabelSelector { } } +// ClusterRestoreJobs selects all existing restore jobs in a cluster. +func ClusterRestoreJobs(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelPGBackRestRestore, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + +// ClusterBackupJobs selects things for all existing backup jobs in cluster. +func ClusterBackupJobs(cluster string) metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + LabelCluster: cluster, + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: LabelPGBackRestBackup, Operator: metav1.LabelSelectorOpExists}, + }, + } +} + // ClusterDataForPostgresAndPGBackRest selects things for PostgreSQL data and // things for pgBackRest data. func ClusterDataForPostgresAndPGBackRest(cluster string) metav1.LabelSelector { @@ -139,13 +152,6 @@ func ClusterPostgresUsers(cluster string) metav1.LabelSelector { } } -// ClusterPrimary selects things for the Primary PostgreSQL instance. -func ClusterPrimary(cluster string) metav1.LabelSelector { - s := ClusterInstances(cluster) - s.MatchLabels[LabelRole] = RolePatroniLeader - return s -} - // CrunchyBridgeClusterPostgresRoles selects things labeled for CrunchyBridgeCluster // PostgreSQL roles in cluster. func CrunchyBridgeClusterPostgresRoles(clusterName string) metav1.LabelSelector { diff --git a/internal/naming/selectors_test.go b/internal/naming/selectors_test.go index 7b9ff2cddb..1f5f42ad96 100644 --- a/internal/naming/selectors_test.go +++ b/internal/naming/selectors_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming @@ -43,6 +32,18 @@ func TestCluster(t *testing.T) { assert.ErrorContains(t, err, "Invalid") } +func TestClusterBackupJobs(t *testing.T) { + s, err := AsSelector(ClusterBackupJobs("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/pgbackrest-backup", + }, ",")) + + _, err = AsSelector(Cluster("--whoa/yikes")) + assert.ErrorContains(t, err, "Invalid") +} + func TestClusterDataForPostgresAndPGBackRest(t *testing.T) { s, err := AsSelector(ClusterDataForPostgresAndPGBackRest("something")) assert.NilError(t, err) @@ -147,16 +148,6 @@ func TestClusterPostgresUsers(t *testing.T) { assert.ErrorContains(t, err, "Invalid") } -func TestClusterPrimary(t *testing.T) { - s, err := AsSelector(ClusterPrimary("something")) - assert.NilError(t, err) - assert.DeepEqual(t, s.String(), strings.Join([]string{ - "postgres-operator.crunchydata.com/cluster=something", - "postgres-operator.crunchydata.com/instance", - "postgres-operator.crunchydata.com/role=master", - }, ",")) -} - func TestCrunchyBridgeClusterPostgresRoles(t *testing.T) { s, err := AsSelector(CrunchyBridgeClusterPostgresRoles("something")) assert.NilError(t, err) diff --git a/internal/naming/telemetry.go b/internal/naming/telemetry.go index d786287fff..5825d6299f 100644 --- a/internal/naming/telemetry.go +++ b/internal/naming/telemetry.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package naming diff --git a/internal/patroni/api.go b/internal/patroni/api.go index b3824904a2..679da5f4af 100644 --- a/internal/patroni/api.go +++ b/internal/patroni/api.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/api_test.go b/internal/patroni/api_test.go index 2df86ce1aa..1603d2fc75 100644 --- a/internal/patroni/api_test.go +++ b/internal/patroni/api_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/certificates.go b/internal/patroni/certificates.go index f7e80c33e1..9aa1525769 100644 --- a/internal/patroni/certificates.go +++ b/internal/patroni/certificates.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/certificates.md b/internal/patroni/certificates.md index 633466d31c..f58786ce20 100644 --- a/internal/patroni/certificates.md +++ b/internal/patroni/certificates.md @@ -1,16 +1,7 @@ Server diff --git a/internal/patroni/certificates_test.go b/internal/patroni/certificates_test.go index bf47b95b46..3073f2247f 100644 --- a/internal/patroni/certificates_test.go +++ b/internal/patroni/certificates_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/config.go b/internal/patroni/config.go index 8fcd845b78..b4d7e54f68 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/config.md b/internal/patroni/config.md index 4c261c40ab..18d28d8a4e 100644 --- a/internal/patroni/config.md +++ b/internal/patroni/config.md @@ -1,16 +1,7 @@ Patroni configuration is complicated. The daemon `patroni` and the client diff --git a/internal/patroni/config_test.go b/internal/patroni/config_test.go index 230d2dd6a4..a45568df8b 100644 --- a/internal/patroni/config_test.go +++ b/internal/patroni/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -715,24 +704,6 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }, - { - name: "pg version 10", - cluster: &v1beta1.PostgresCluster{ - Spec: v1beta1.PostgresClusterSpec{ - PostgresVersion: 10, - }, - }, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{}, - "pg_hba": []string{}, - "use_pg_rewind": false, - "use_slots": false, - }, - }, - }, { name: "tde enabled", cluster: &v1beta1.PostgresCluster{ diff --git a/internal/patroni/doc.go b/internal/patroni/doc.go index 8962a0af23..500305406d 100644 --- a/internal/patroni/doc.go +++ b/internal/patroni/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package patroni provides clients, utilities and resources for configuring and // interacting with Patroni inside of a PostgreSQL cluster diff --git a/internal/patroni/rbac.go b/internal/patroni/rbac.go index a476f3b08d..dcf3f18cea 100644 --- a/internal/patroni/rbac.go +++ b/internal/patroni/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -23,25 +12,25 @@ import ( ) // "list", "patch", and "watch" are required. Include "get" for good measure. -// +kubebuilder:rbac:namespace=patroni,groups="",resources="pods",verbs={get} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="pods",verbs={list,watch} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="pods",verbs={patch} +// +kubebuilder:rbac:groups="",resources="pods",verbs={get} +// +kubebuilder:rbac:groups="",resources="pods",verbs={list,watch} +// +kubebuilder:rbac:groups="",resources="pods",verbs={patch} // TODO(cbandy): Separate these so that one can choose ConfigMap over Endpoints. // When using Endpoints for DCS, "create", "list", "patch", and "watch" are // required. Include "get" for good measure. The `patronictl scaffold` and // `patronictl remove` commands require "deletecollection". -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={get} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={create,deletecollection} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={list,watch} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints",verbs={patch} -// +kubebuilder:rbac:namespace=patroni,groups="",resources="services",verbs={create} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={create,deletecollection} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={list,watch} +// +kubebuilder:rbac:groups="",resources="endpoints",verbs={patch} +// +kubebuilder:rbac:groups="",resources="services",verbs={create} // The OpenShift RestrictedEndpointsAdmission plugin requires special // authorization to create Endpoints that contain Pod IPs. // - https://github.com/openshift/origin/pull/9383 -// +kubebuilder:rbac:namespace=patroni,groups="",resources="endpoints/restricted",verbs={create} +// +kubebuilder:rbac:groups="",resources="endpoints/restricted",verbs={create} // Permissions returns the RBAC rules Patroni needs for cluster. func Permissions(cluster *v1beta1.PostgresCluster) []rbacv1.PolicyRule { diff --git a/internal/patroni/rbac_test.go b/internal/patroni/rbac_test.go index e62c34709c..39a8dff245 100644 --- a/internal/patroni/rbac_test.go +++ b/internal/patroni/rbac_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/patroni/reconcile.go b/internal/patroni/reconcile.go index 06f5d6f1e6..4fbb08b67d 100644 --- a/internal/patroni/reconcile.go +++ b/internal/patroni/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni @@ -46,7 +35,7 @@ func ClusterConfigMap(ctx context.Context, ) error { var err error - initialize.StringMap(&outClusterConfigMap.Data) + initialize.Map(&outClusterConfigMap.Data) outClusterConfigMap.Data[configMapFileKey], err = clusterYAML(inCluster, inHBAs, inParameters) @@ -62,7 +51,7 @@ func InstanceConfigMap(ctx context.Context, ) error { var err error - initialize.StringMap(&outInstanceConfigMap.Data) + initialize.Map(&outInstanceConfigMap.Data) command := pgbackrest.ReplicaCreateCommand(inCluster, inInstanceSpec) @@ -77,7 +66,7 @@ func InstanceCertificates(ctx context.Context, inRoot pki.Certificate, inDNS pki.Certificate, inDNSKey pki.PrivateKey, outInstanceCertificates *corev1.Secret, ) error { - initialize.ByteMap(&outInstanceCertificates.Data) + initialize.Map(&outInstanceCertificates.Data) var err error outInstanceCertificates.Data[certAuthorityFileKey], err = certFile(inRoot) diff --git a/internal/patroni/reconcile_test.go b/internal/patroni/reconcile_test.go index 89b3920334..5d2a2c0ad5 100644 --- a/internal/patroni/reconcile_test.go +++ b/internal/patroni/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package patroni diff --git a/internal/pgadmin/config.go b/internal/pgadmin/config.go index 4552b77d29..553a90f656 100644 --- a/internal/pgadmin/config.go +++ b/internal/pgadmin/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin diff --git a/internal/pgadmin/config_test.go b/internal/pgadmin/config_test.go index cdb3e1b569..87cd7847c2 100644 --- a/internal/pgadmin/config_test.go +++ b/internal/pgadmin/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin diff --git a/internal/pgadmin/reconcile.go b/internal/pgadmin/reconcile.go index 3e56989fb5..af62c482f2 100644 --- a/internal/pgadmin/reconcile.go +++ b/internal/pgadmin/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin @@ -43,8 +32,6 @@ RED="\033[0;31m" GREEN="\033[0;32m" RESET="\033[0m" -CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} - function enable_debugging() { if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] then @@ -130,8 +117,6 @@ then err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" fi -cd ${PGADMIN_DIR?} - echo_info "Starting Apache web server.." /usr/sbin/httpd -D FOREGROUND & echo $! > $APACHE_PIDFILE @@ -148,7 +133,7 @@ func ConfigMap( return nil } - initialize.StringMap(&outConfigMap.Data) + initialize.Map(&outConfigMap.Data) // To avoid spurious reconciles, the following value must not change when // the spec does not change. [json.Encoder] and [json.Marshal] do this by diff --git a/internal/pgadmin/reconcile_test.go b/internal/pgadmin/reconcile_test.go index 7448552029..f91a9b807f 100644 --- a/internal/pgadmin/reconcile_test.go +++ b/internal/pgadmin/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin @@ -117,8 +106,6 @@ containers: GREEN="\033[0;32m" RESET="\033[0m" - CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} - function enable_debugging() { if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] then @@ -204,8 +191,6 @@ containers: err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" fi - cd ${PGADMIN_DIR?} - echo_info "Starting Apache web server.." /usr/sbin/httpd -D FOREGROUND & echo $! > $APACHE_PIDFILE @@ -355,8 +340,6 @@ containers: GREEN="\033[0;32m" RESET="\033[0m" - CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'} - function enable_debugging() { if [[ ${CRUNCHY_DEBUG:-false} == "true" ]] then @@ -442,8 +425,6 @@ containers: err_check "$?" "pgAdmin4 Database Setup" "Could not create pgAdmin4 database: \n$(cat /tmp/pgadmin4.stderr)" fi - cd ${PGADMIN_DIR?} - echo_info "Starting Apache web server.." /usr/sbin/httpd -D FOREGROUND & echo $! > $APACHE_PIDFILE diff --git a/internal/pgadmin/users.go b/internal/pgadmin/users.go index 9c66cb36f2..7ce69ce211 100644 --- a/internal/pgadmin/users.go +++ b/internal/pgadmin/users.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin diff --git a/internal/pgadmin/users_test.go b/internal/pgadmin/users_test.go index 0bfa73d55d..69619667af 100644 --- a/internal/pgadmin/users_test.go +++ b/internal/pgadmin/users_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgadmin diff --git a/internal/pgaudit/postgres.go b/internal/pgaudit/postgres.go index 0941b40434..07867d020e 100644 --- a/internal/pgaudit/postgres.go +++ b/internal/pgaudit/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgaudit diff --git a/internal/pgaudit/postgres_test.go b/internal/pgaudit/postgres_test.go index 170a3b691e..3734e511f0 100644 --- a/internal/pgaudit/postgres_test.go +++ b/internal/pgaudit/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgaudit diff --git a/internal/pgbackrest/certificates.go b/internal/pgbackrest/certificates.go index e9bf93cf73..bb2633dfe7 100644 --- a/internal/pgbackrest/certificates.go +++ b/internal/pgbackrest/certificates.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/certificates.md b/internal/pgbackrest/certificates.md index ef6a1dd7b0..344616486b 100644 --- a/internal/pgbackrest/certificates.md +++ b/internal/pgbackrest/certificates.md @@ -1,16 +1,7 @@ Server diff --git a/internal/pgbackrest/certificates_test.go b/internal/pgbackrest/certificates_test.go index 0903deef4d..4ef41b2879 100644 --- a/internal/pgbackrest/certificates_test.go +++ b/internal/pgbackrest/certificates_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 03cfb49d9f..f50b2690ee 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -99,9 +88,8 @@ func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, } // create an empty map for the config data - initialize.StringMap(&cm.Data) + initialize.Map(&cm.Data) - addDedicatedHost := DedicatedRepoHostEnabled(postgresCluster) pgdataDir := postgres.DataDirectory(postgresCluster) // Port will always be populated, since the API will set a default of 5432 if not provided pgPort := *postgresCluster.Spec.Port @@ -114,13 +102,14 @@ func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, postgresCluster.Spec.Backups.PGBackRest.Global, ).String() - // As the cluster transitions from having a repository host to having none, // PostgreSQL instances that have not rolled out expect to mount a server // config file. Always populate that file so those volumes stay valid and - // Kubernetes propagates their contents to those pods. + // Kubernetes propagates their contents to those pods. The repo host name + // given below should always be set, but this guards for cases when it might + // not be. cm.Data[serverConfigMapKey] = "" - if addDedicatedHost && repoHostName != "" { + if repoHostName != "" { cm.Data[serverConfigMapKey] = iniGeneratedWarning + serverConfig(postgresCluster).String() @@ -232,8 +221,8 @@ bash -xc "pgbackrest restore ${opts}" rm -f "${pgdata}/patroni.dynamic.json" export PGDATA="${pgdata}" PGHOST='/tmp' -until [ "${recovery=}" = 'f' ]; do -if [ -z "${recovery}" ]; then +until [[ "${recovery=}" == 'f' ]]; do +if [[ -z "${recovery}" ]]; then control=$(pg_controldata) read -r max_conn <<< "${control##*max_connections setting:}" read -r max_lock <<< "${control##*max_locks_per_xact setting:}" @@ -253,7 +242,7 @@ unix_socket_directories = '/tmp'` + ekc + ` huge_pages = ` + hugePagesSetting + ` EOF -if [ "$(< "${pgdata}/PG_VERSION")" -ge 12 ]; then +if [[ "$(< "${pgdata}/PG_VERSION")" -ge 12 ]]; then read -r max_wals <<< "${control##*max_wal_senders setting:}" echo >> /tmp/postgres.restore.conf "max_wal_senders = '${max_wals}'" fi @@ -265,7 +254,7 @@ recovery=$(psql -Atc "SELECT CASE WHEN NOT pg_catalog.pg_is_in_recovery() THEN false WHEN NOT pg_catalog.pg_is_wal_replay_paused() THEN true ELSE pg_catalog.pg_wal_replay_resume()::text = '' -END recovery" && sleep 1) || true +END recovery" && sleep 1) ||: done pg_ctl stop --silent --wait --timeout=31536000 @@ -274,6 +263,42 @@ mv "${pgdata}" "${pgdata}_bootstrap"` return append([]string{"bash", "-ceu", "--", restoreScript, "-", pgdata}, args...) } +// DedicatedSnapshotVolumeRestoreCommand returns the command for performing a pgBackRest delta restore +// into a dedicated snapshot volume. In addition to calling the pgBackRest restore command with any +// pgBackRest options provided, the script also removes the patroni.dynamic.json file if present. This +// ensures the configuration from the cluster being restored from is not utilized when bootstrapping a +// new cluster, and the configuration for the new cluster is utilized instead. +func DedicatedSnapshotVolumeRestoreCommand(pgdata string, args ...string) []string { + + // The postmaster.pid file is removed, if it exists, before attempting a restore. + // This allows the restore to be tried more than once without the causing an + // error due to the presence of the file in subsequent attempts. + + // Wrap pgbackrest restore command in backup_label checks. If pre/post + // backup_labels are different, restore moved database forward, so return 0 + // so that the Job is successful and we know to proceed with snapshot. + // Otherwise return 1, Job will fail, and we will not proceed with snapshot. + restoreScript := `declare -r pgdata="$1" opts="$2" +BACKUP_LABEL=$([[ ! -e "${pgdata}/backup_label" ]] || md5sum "${pgdata}/backup_label") +echo "Starting pgBackRest delta restore" + +install --directory --mode=0700 "${pgdata}" +rm -f "${pgdata}/postmaster.pid" +bash -xc "pgbackrest restore ${opts}" +rm -f "${pgdata}/patroni.dynamic.json" + +BACKUP_LABEL_POST=$([[ ! -e "${pgdata}/backup_label" ]] || md5sum "${pgdata}/backup_label") +if [[ "${BACKUP_LABEL}" != "${BACKUP_LABEL_POST}" ]] +then + exit 0 +fi +echo Database was not advanced by restore. No snapshot will be taken. +echo Check that your last backup was successful. +exit 1` + + return append([]string{"bash", "-ceu", "--", restoreScript, "-", pgdata}, args...) +} + // populatePGInstanceConfigurationMap returns options representing the pgBackRest configuration for // a PostgreSQL instance func populatePGInstanceConfigurationMap( @@ -291,6 +316,10 @@ func populatePGInstanceConfigurationMap( global := iniMultiSet{} stanza := iniMultiSet{} + // For faster and more robust WAL archiving, we turn on pgBackRest archive-async. + global.Set("archive-async", "y") + // pgBackRest spool-path should always be co-located with the Postgres WAL path. + global.Set("spool-path", "/pgdata/pgbackrest-spool") // pgBackRest will log to the pgData volume for commands run on the PostgreSQL instance global.Set("log-path", naming.PGBackRestPGDataLogPath) @@ -368,13 +397,18 @@ func populateRepoHostConfigurationMap( if !pgBackRestLogPathSet && repo.Volume != nil { // pgBackRest will log to the first configured repo volume when commands // are run on the pgBackRest repo host. With our previous check in - // DedicatedRepoHostEnabled(), we've already validated that at least one + // RepoHostVolumeDefined(), we've already validated that at least one // defined repo has a volume. global.Set("log-path", fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name)) pgBackRestLogPathSet = true } } + // If no log path was set, don't log because the default path is not writable. + if !pgBackRestLogPathSet { + global.Set("log-level-file", "off") + } + for option, val := range globalConfig { global.Set(option, val) } @@ -451,21 +485,21 @@ func reloadCommand(name string) []string { // mtimes. // - https://unix.stackexchange.com/a/407383 const script = ` -exec {fd}<> <(:) +exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done diff --git a/internal/pgbackrest/config.md b/internal/pgbackrest/config.md index 13ed59b64b..2101535b3a 100644 --- a/internal/pgbackrest/config.md +++ b/internal/pgbackrest/config.md @@ -1,16 +1,7 @@ # pgBackRest Configuration Overview @@ -31,6 +22,8 @@ As shown, the settings with the `cfgSectionGlobal` designation are `log-path`: The log path provides a location for pgBackRest to store log files. +`log-level-file`: Level for file logging. Set to 'off' when the repo host has no volume. + `repo-path`: Path where backups and archive are stored. The repository is where pgBackRest stores backups and archives WAL segments. @@ -75,6 +68,7 @@ pg1-socket-path [global] log-path repo1-path +log-level-file [stanza] pg1-host diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index c6f7f9ed02..b74bf9a4a8 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -131,6 +120,7 @@ pg1-socket-path = /tmp/postgres # Your changes will not be saved. [global] +archive-async = y log-path = /pgdata/pgbackrest/log repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt @@ -151,6 +141,7 @@ repo4-s3-bucket = s-bucket repo4-s3-endpoint = endpoint-s repo4-s3-region = earth repo4-type = s3 +spool-path = /pgdata/pgbackrest-spool [db] pg1-path = /pgdata/pg12 @@ -374,6 +365,36 @@ func TestRestoreCommandTDE(t *testing.T) { assert.Assert(t, strings.Contains(string(b), "encryption_key_command = 'echo testValue'"), "expected encryption_key_command setting, got:\n%s", b) } + +func TestDedicatedSnapshotVolumeRestoreCommand(t *testing.T) { + shellcheck := require.ShellCheck(t) + + pgdata := "/pgdata/pg13" + opts := []string{ + "--stanza=" + DefaultStanzaName, "--pg1-path=" + pgdata, + "--repo=1"} + command := DedicatedSnapshotVolumeRestoreCommand(pgdata, strings.Join(opts, " ")) + + assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) + assert.Assert(t, len(command) > 3) + + dir := t.TempDir() + file := filepath.Join(dir, "script.bash") + assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) + + cmd := exec.Command(shellcheck, "--enable=all", file) + output, err := cmd.CombinedOutput() + assert.NilError(t, err, "%q\n%s", cmd.Args, output) +} + +func TestDedicatedSnapshotVolumeRestoreCommandPrettyYAML(t *testing.T) { + b, err := yaml.Marshal(DedicatedSnapshotVolumeRestoreCommand("/dir", "--options")) + + assert.NilError(t, err) + assert.Assert(t, strings.Contains(string(b), "\n- |"), + "expected literal block scalar, got:\n%s", b) +} + func TestServerConfig(t *testing.T) { cluster := &v1beta1.PostgresCluster{} cluster.UID = "shoe" diff --git a/internal/pgbackrest/helpers_test.go b/internal/pgbackrest/helpers_test.go deleted file mode 100644 index 265517c8af..0000000000 --- a/internal/pgbackrest/helpers_test.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbackrest - -import ( - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} diff --git a/internal/pgbackrest/iana.go b/internal/pgbackrest/iana.go index 9d36385ed6..c6e2f71e6c 100644 --- a/internal/pgbackrest/iana.go +++ b/internal/pgbackrest/iana.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/options.go b/internal/pgbackrest/options.go index 54cb7dac37..2439901e47 100644 --- a/internal/pgbackrest/options.go +++ b/internal/pgbackrest/options.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/options_test.go b/internal/pgbackrest/options_test.go index f31853781c..374737ec7f 100644 --- a/internal/pgbackrest/options_test.go +++ b/internal/pgbackrest/options_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/pgbackrest.go b/internal/pgbackrest/pgbackrest.go index a62f098a17..21124b9744 100644 --- a/internal/pgbackrest/pgbackrest.go +++ b/internal/pgbackrest/pgbackrest.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -20,9 +9,10 @@ import ( "context" "fmt" "io" - "strings" "github.com/pkg/errors" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) const ( @@ -30,9 +20,9 @@ const ( // is detected while attempting stanza creation errMsgConfigHashMismatch = "postgres operator error: pgBackRest config hash mismatch" - // errMsgBackupDbMismatch is the error message returned from pgBackRest when PG versions - // or PG system identifiers do not match between the PG instance and the existing stanza - errMsgBackupDbMismatch = "backup and archive info files exist but do not match the database" + // errMsgStaleReposWithVolumesConfig is the error message displayed when a volume-backed repo has been + // configured, but the configuration has not yet propagated into the container. + errMsgStaleReposWithVolumesConfig = "postgres operator error: pgBackRest stale volume-backed repo configuration" ) // Executor calls "pgbackrest" commands @@ -51,31 +41,50 @@ type Executor func( // from running (with a config mismatch indicating that the pgBackRest configuration as stored in // the cluster's pgBackRest ConfigMap has not yet propagated to the Pod). func (exec Executor) StanzaCreateOrUpgrade(ctx context.Context, configHash string, - upgrade bool) (bool, error) { + postgresCluster *v1beta1.PostgresCluster) (bool, error) { var stdout, stderr bytes.Buffer - stanzaCmd := "create" - if upgrade { - stanzaCmd = "upgrade" + var reposWithVolumes []v1beta1.PGBackRestRepo + for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume != nil { + reposWithVolumes = append(reposWithVolumes, repo) + } + } + + grep := "grep %s-path /etc/pgbackrest/conf.d/pgbackrest_instance.conf" + + var checkRepoCmd string + if len(reposWithVolumes) > 0 { + repo := reposWithVolumes[0] + checkRepoCmd = checkRepoCmd + fmt.Sprintf(grep, repo.Name) + + reposWithVolumes = reposWithVolumes[1:] + for _, repo := range reposWithVolumes { + checkRepoCmd = checkRepoCmd + fmt.Sprintf(" && "+grep, repo.Name) + } } // this is the script that is run to create a stanza. First it checks the // "config-hash" file to ensure all configuration changes (e.g. from ConfigMaps) have // propagated to the container, and if not, it prints an error and returns with exit code 1). + // Next, it checks that any volume-backed repo added to the config has propagated into + // the container, and if not, prints an error and exits with code 1. // Otherwise, it runs the pgbackrest command, which will either be "stanza-create" or // "stanza-upgrade", depending on the value of the boolean "upgrade" parameter. const script = ` -declare -r hash="$1" stanza="$2" message="$3" cmd="$4" +declare -r hash="$1" stanza="$2" hash_msg="$3" vol_msg="$4" check_repo_cmd="$5" if [[ "$(< /etc/pgbackrest/conf.d/config-hash)" != "${hash}" ]]; then - printf >&2 "%s" "${message}"; exit 1; + printf >&2 "%s" "${hash_msg}"; exit 1; +elif ! bash -c "${check_repo_cmd}"; then + printf >&2 "%s" "${vol_msg}"; exit 1; else - pgbackrest "${cmd}" --stanza="${stanza}" + pgbackrest stanza-create --stanza="${stanza}" || pgbackrest stanza-upgrade --stanza="${stanza}" fi ` if err := exec(ctx, nil, &stdout, &stderr, "bash", "-ceu", "--", - script, "-", configHash, DefaultStanzaName, errMsgConfigHashMismatch, - fmt.Sprintf("stanza-%s", stanzaCmd)); err != nil { + script, "-", configHash, DefaultStanzaName, errMsgConfigHashMismatch, errMsgStaleReposWithVolumesConfig, + checkRepoCmd); err != nil { errReturn := stderr.String() @@ -86,10 +95,10 @@ fi return true, nil } - // if the err returned from pgbackrest command is about a version mismatch - // then we should run upgrade rather than create - if strings.Contains(errReturn, errMsgBackupDbMismatch) { - return exec.StanzaCreateOrUpgrade(ctx, configHash, true) + // if the configuration for volume-backed repositories is stale, return true and don't return an error since this + // is expected while waiting for config changes in ConfigMaps to make it to the container + if errReturn == errMsgStaleReposWithVolumesConfig { + return true, nil } // if none of the above errors, return the err diff --git a/internal/pgbackrest/pgbackrest_test.go b/internal/pgbackrest/pgbackrest_test.go index 0af8b2aab0..33c97913cf 100644 --- a/internal/pgbackrest/pgbackrest_test.go +++ b/internal/pgbackrest/pgbackrest_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -24,8 +13,13 @@ import ( "testing" "gotest.tools/v3/assert" + "k8s.io/apimachinery/pkg/api/resource" + + corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/testing/require" + + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestStanzaCreateOrUpgrade(t *testing.T) { @@ -34,15 +28,19 @@ func TestStanzaCreateOrUpgrade(t *testing.T) { ctx := context.Background() configHash := "7f5d4d5bdc" expectedCommand := []string{"bash", "-ceu", "--", ` -declare -r hash="$1" stanza="$2" message="$3" cmd="$4" +declare -r hash="$1" stanza="$2" hash_msg="$3" vol_msg="$4" check_repo_cmd="$5" if [[ "$(< /etc/pgbackrest/conf.d/config-hash)" != "${hash}" ]]; then - printf >&2 "%s" "${message}"; exit 1; + printf >&2 "%s" "${hash_msg}"; exit 1; +elif ! bash -c "${check_repo_cmd}"; then + printf >&2 "%s" "${vol_msg}"; exit 1; else - pgbackrest "${cmd}" --stanza="${stanza}" + pgbackrest stanza-create --stanza="${stanza}" || pgbackrest stanza-upgrade --stanza="${stanza}" fi `, "-", "7f5d4d5bdc", "db", "postgres operator error: pgBackRest config hash mismatch", - "stanza-create"} + "postgres operator error: pgBackRest stale volume-backed repo configuration", + "grep repo1-path /etc/pgbackrest/conf.d/pgbackrest_instance.conf", + } var shellCheckScript string stanzaExec := func(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, @@ -56,8 +54,36 @@ fi return nil } + postgresCluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Repos: []v1beta1.PGBackRestRepo{{ + Name: "repo1", + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, { + Name: "repo2", + S3: &v1beta1.RepoS3{ + Bucket: "bucket", + Endpoint: "endpoint", + Region: "region", + }, + }}, + }, + }, + }, + } - configHashMismatch, err := Executor(stanzaExec).StanzaCreateOrUpgrade(ctx, configHash, false) + configHashMismatch, err := Executor(stanzaExec).StanzaCreateOrUpgrade(ctx, configHash, postgresCluster) assert.NilError(t, err) assert.Assert(t, !configHashMismatch) diff --git a/internal/pgbackrest/postgres.go b/internal/pgbackrest/postgres.go index 4636ee9db5..ab5c71868c 100644 --- a/internal/pgbackrest/postgres.go +++ b/internal/pgbackrest/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -26,6 +15,7 @@ import ( func PostgreSQL( inCluster *v1beta1.PostgresCluster, outParameters *postgres.Parameters, + backupsEnabled bool, ) { if outParameters.Mandatory == nil { outParameters.Mandatory = postgres.NewParameterSet() @@ -38,9 +28,15 @@ func PostgreSQL( // - https://pgbackrest.org/user-guide.html#quickstart/configure-archiving // - https://pgbackrest.org/command.html#command-archive-push // - https://www.postgresql.org/docs/current/runtime-config-wal.html - archive := `pgbackrest --stanza=` + DefaultStanzaName + ` archive-push "%p"` outParameters.Mandatory.Add("archive_mode", "on") - outParameters.Mandatory.Add("archive_command", archive) + if backupsEnabled { + archive := `pgbackrest --stanza=` + DefaultStanzaName + ` archive-push "%p"` + outParameters.Mandatory.Add("archive_command", archive) + } else { + // If backups are disabled, keep archive_mode on (to avoid a Postgres restart) + // and throw away WAL. + outParameters.Mandatory.Add("archive_command", `true`) + } // archive_timeout is used to determine at what point a WAL file is switched, // if the WAL archive has not reached its full size in # of transactions diff --git a/internal/pgbackrest/postgres_test.go b/internal/pgbackrest/postgres_test.go index da41b86281..b87b35631a 100644 --- a/internal/pgbackrest/postgres_test.go +++ b/internal/pgbackrest/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -28,7 +17,7 @@ func TestPostgreSQLParameters(t *testing.T) { cluster := new(v1beta1.PostgresCluster) parameters := new(postgres.Parameters) - PostgreSQL(cluster, parameters) + PostgreSQL(cluster, parameters, true) assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ "archive_mode": "on", "archive_command": `pgbackrest --stanza=db archive-push "%p"`, @@ -39,12 +28,19 @@ func TestPostgreSQLParameters(t *testing.T) { "archive_timeout": "60s", }) + PostgreSQL(cluster, parameters, false) + assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ + "archive_mode": "on", + "archive_command": "true", + "restore_command": `pgbackrest --stanza=db archive-get %f "%p"`, + }) + cluster.Spec.Standby = &v1beta1.PostgresStandbySpec{ Enabled: true, RepoName: "repo99", } - PostgreSQL(cluster, parameters) + PostgreSQL(cluster, parameters, true) assert.DeepEqual(t, parameters.Mandatory.AsMap(), map[string]string{ "archive_mode": "on", "archive_command": `pgbackrest --stanza=db archive-push "%p"`, diff --git a/internal/pgbackrest/rbac.go b/internal/pgbackrest/rbac.go index 5fe4cc4b96..950f10ef8b 100644 --- a/internal/pgbackrest/rbac.go +++ b/internal/pgbackrest/rbac.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -22,8 +11,8 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// +kubebuilder:rbac:namespace=pgbackrest,groups="",resources="pods",verbs={list} -// +kubebuilder:rbac:namespace=pgbackrest,groups="",resources="pods/exec",verbs={create} +// +kubebuilder:rbac:groups="",resources="pods",verbs={list} +// +kubebuilder:rbac:groups="",resources="pods/exec",verbs={create} // Permissions returns the RBAC rules pgBackRest needs for a cluster. func Permissions(cluster *v1beta1.PostgresCluster) []rbacv1.PolicyRule { diff --git a/internal/pgbackrest/rbac_test.go b/internal/pgbackrest/rbac_test.go index 6b213df664..a620276f64 100644 --- a/internal/pgbackrest/rbac_test.go +++ b/internal/pgbackrest/rbac_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index f7b6b029ea..d22bccc3c0 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -24,11 +13,11 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -116,22 +105,15 @@ func AddConfigToInstancePod( {Key: ConfigHashKey, Path: ConfigHashKey}, } - // As the cluster transitions from having a repository host to having none, - // PostgreSQL instances that have not rolled out expect to mount client - // certificates. Specify those files are optional so the configuration - // volumes stay valid and Kubernetes propagates their contents to those pods. secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} secret.Secret.Name = naming.PGBackRestSecret(cluster).Name - secret.Secret.Optional = initialize.Bool(true) - if DedicatedRepoHostEnabled(cluster) { - configmap.ConfigMap.Items = append( - configmap.ConfigMap.Items, corev1.KeyToPath{ - Key: serverConfigMapKey, - Path: serverConfigProjectionPath, - }) - secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) - } + configmap.ConfigMap.Items = append( + configmap.ConfigMap.Items, corev1.KeyToPath{ + Key: serverConfigMapKey, + Path: serverConfigProjectionPath, + }) + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) // Start with a copy of projections specified in the cluster. Items later in // the list take precedence over earlier items (that is, last write wins). @@ -289,6 +271,7 @@ func addConfigVolumeAndMounts( // addServerContainerAndVolume adds the TLS server container and certificate // projections to pod. Any PostgreSQL data and WAL volumes in pod are also mounted. func addServerContainerAndVolume( + ctx context.Context, cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, certificates []corev1.VolumeProjection, resources *corev1.ResourceRequirements, ) { @@ -332,7 +315,7 @@ func addServerContainerAndVolume( postgres.DataVolumeMount().Name: postgres.DataVolumeMount(), postgres.WALVolumeMount().Name: postgres.WALVolumeMount(), } - if util.DefaultMutableFeatureGate.Enabled(util.TablespaceVolumes) { + if feature.Enabled(ctx, feature.TablespaceVolumes) { for _, instance := range cluster.Spec.InstanceSets { for _, vol := range instance.TablespaceVolumes { tablespaceVolumeMount := postgres.TablespaceVolumeMount(vol.Name) @@ -370,6 +353,7 @@ func addServerContainerAndVolume( // AddServerToInstancePod adds the TLS server container and volume to pod for // an instance of cluster. Any PostgreSQL volumes must already be in pod. func AddServerToInstancePod( + ctx context.Context, cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, instanceCertificateSecretName string, ) { @@ -387,12 +371,13 @@ func AddServerToInstancePod( resources = sidecars.PGBackRest.Resources } - addServerContainerAndVolume(cluster, pod, certificates, resources) + addServerContainerAndVolume(ctx, cluster, pod, certificates, resources) } // AddServerToRepoPod adds the TLS server container and volume to pod for // the dedicated repository host of cluster. func AddServerToRepoPod( + ctx context.Context, cluster *v1beta1.PostgresCluster, pod *corev1.PodSpec, ) { certificates := []corev1.VolumeProjection{{ @@ -409,7 +394,7 @@ func AddServerToRepoPod( resources = &cluster.Spec.Backups.PGBackRest.RepoHost.Resources } - addServerContainerAndVolume(cluster, pod, certificates, resources) + addServerContainerAndVolume(ctx, cluster, pod, certificates, resources) } // InstanceCertificates populates the shared Secret with certificates needed to run pgBackRest. @@ -421,15 +406,13 @@ func InstanceCertificates(ctx context.Context, ) error { var err error - if DedicatedRepoHostEnabled(inCluster) { - initialize.ByteMap(&outInstanceCertificates.Data) + initialize.Map(&outInstanceCertificates.Data) - if err == nil { - outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) - } - if err == nil { - outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) - } + if err == nil { + outInstanceCertificates.Data[certInstanceSecretKey], err = certFile(inDNS) + } + if err == nil { + outInstanceCertificates.Data[certInstancePrivateKeySecretKey], err = certFile(inDNSKey) } return err @@ -490,7 +473,7 @@ func RestoreConfig( sourceConfigMap, targetConfigMap *corev1.ConfigMap, sourceSecret, targetSecret *corev1.Secret, ) { - initialize.StringMap(&targetConfigMap.Data) + initialize.Map(&targetConfigMap.Data) // Use the repository definitions from the source cluster. // @@ -502,7 +485,7 @@ func RestoreConfig( targetConfigMap.Data[CMInstanceKey] = sourceConfigMap.Data[CMInstanceKey] if sourceSecret != nil && targetSecret != nil { - initialize.ByteMap(&targetSecret.Data) + initialize.Map(&targetSecret.Data) // - https://golang.org/issue/45038 bytesClone := func(b []byte) []byte { return append([]byte(nil), b...) } @@ -526,7 +509,7 @@ func Secret(ctx context.Context, // Save the CA and generate a TLS client certificate for the entire cluster. if inRepoHost != nil { - initialize.ByteMap(&outSecret.Data) + initialize.Map(&outSecret.Data) // The server verifies its "tls-server-auth" option contains the common // name (CN) of the certificate presented by a client. The entire diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index 257529fc0c..4957d58f7b 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -28,9 +17,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -197,7 +187,7 @@ func TestAddConfigToInstancePod(t *testing.T) { assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only database and pgBackRest containers have mounts. - assert.Assert(t, marshalMatches(result.Containers, ` + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` - name: database resources: {} volumeMounts: @@ -229,7 +219,7 @@ func TestAddConfigToInstancePod(t *testing.T) { alwaysExpect(t, out) // Instance configuration files after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -241,7 +231,19 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest `)) }) @@ -253,8 +255,8 @@ func TestAddConfigToInstancePod(t *testing.T) { AddConfigToInstancePod(cluster, out) alwaysExpect(t, out) - // Instance configuration files but no certificates. - assert.Assert(t, marshalMatches(out.Volumes, ` + // Instance configuration and certificates. + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -264,7 +266,19 @@ func TestAddConfigToInstancePod(t *testing.T) { path: pgbackrest_instance.conf - key: config-hash path: config-hash + - key: pgbackrest-server.conf + path: ~postgres-operator_server.conf name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest `)) }) @@ -282,7 +296,7 @@ func TestAddConfigToInstancePod(t *testing.T) { alwaysExpect(t, out) // Instance configuration files, server config, and optional client certificates. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -305,7 +319,6 @@ func TestAddConfigToInstancePod(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest - optional: true `)) }) } @@ -327,7 +340,7 @@ func TestAddConfigToRepoPod(t *testing.T) { assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only pgBackRest containers have mounts. - assert.Assert(t, marshalMatches(result.Containers, ` + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` - name: other resources: {} - name: pgbackrest @@ -354,7 +367,7 @@ func TestAddConfigToRepoPod(t *testing.T) { // Repository configuration files, server config, and client certificates // after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -400,7 +413,7 @@ func TestAddConfigToRestorePod(t *testing.T) { assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // Only pgBackRest containers have mounts. - assert.Assert(t, marshalMatches(result.Containers, ` + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` - name: other resources: {} - name: pgbackrest @@ -435,7 +448,7 @@ func TestAddConfigToRestorePod(t *testing.T) { // Instance configuration files and optional client certificates // after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -479,7 +492,7 @@ func TestAddConfigToRestorePod(t *testing.T) { // Instance configuration files and optional client certificates // after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-config projected: sources: @@ -521,7 +534,7 @@ func TestAddConfigToRestorePod(t *testing.T) { // Instance configuration files and optional configuration files // after custom projections. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: postgres-config projected: sources: @@ -551,6 +564,9 @@ func TestAddConfigToRestorePod(t *testing.T) { } func TestAddServerToInstancePod(t *testing.T) { + t.Parallel() + + ctx := context.Background() cluster := v1beta1.PostgresCluster{} cluster.Name = "hippo" cluster.Default() @@ -568,7 +584,6 @@ func TestAddServerToInstancePod(t *testing.T) { } t.Run("CustomResources", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=false"))) cluster := cluster.DeepCopy() cluster.Spec.Backups.PGBackRest.Sidecars = &v1beta1.PGBackRestSidecars{ PGBackRest: &v1beta1.Sidecar{ @@ -588,14 +603,14 @@ func TestAddServerToInstancePod(t *testing.T) { } out := pod.DeepCopy() - AddServerToInstancePod(cluster, out, "instance-secret-name") + AddServerToInstancePod(ctx, cluster, out, "instance-secret-name") // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // The TLS server is added while other containers are untouched. // It has PostgreSQL volumes mounted while other volumes are ignored. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} - name: other @@ -636,21 +651,21 @@ func TestAddServerToInstancePod(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -681,7 +696,7 @@ func TestAddServerToInstancePod(t *testing.T) { // The server certificate comes from the instance Secret. // Other volumes are untouched. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: other - name: postgres-data - name: postgres-wal @@ -700,7 +715,12 @@ func TestAddServerToInstancePod(t *testing.T) { }) t.Run("AddTablespaces", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=true"))) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx := feature.NewContext(ctx, gate) + clusterWithTablespaces := cluster.DeepCopy() clusterWithTablespaces.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{ { @@ -713,11 +733,11 @@ func TestAddServerToInstancePod(t *testing.T) { out := pod.DeepCopy() out.Volumes = append(out.Volumes, corev1.Volume{Name: "tablespace-trial"}, corev1.Volume{Name: "tablespace-castle"}) - AddServerToInstancePod(clusterWithTablespaces, out, "instance-secret-name") + AddServerToInstancePod(ctx, clusterWithTablespaces, out, "instance-secret-name") // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: database resources: {} - name: other @@ -760,21 +780,21 @@ func TestAddServerToInstancePod(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -804,6 +824,9 @@ func TestAddServerToInstancePod(t *testing.T) { } func TestAddServerToRepoPod(t *testing.T) { + t.Parallel() + + ctx := context.Background() cluster := v1beta1.PostgresCluster{} cluster.Name = "hippo" cluster.Default() @@ -834,13 +857,13 @@ func TestAddServerToRepoPod(t *testing.T) { } out := pod.DeepCopy() - AddServerToRepoPod(cluster, out) + AddServerToRepoPod(ctx, cluster, out) // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) // The TLS server is added while other containers are untouched. - assert.Assert(t, marshalMatches(out.Containers, ` + assert.Assert(t, cmp.MarshalMatches(out.Containers, ` - name: other resources: {} - command: @@ -875,21 +898,21 @@ func TestAddServerToRepoPod(t *testing.T) { - -- - |- monitor() { - exec {fd}<> <(:) + exec {fd}<> <(:||:) until read -r -t 5 -u "${fd}"; do if - [ "${filename}" -nt "/proc/self/fd/${fd}" ] && + [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --dereference --format='Loaded configuration dated %y' "${filename}" elif - { [ "${directory}" -nt "/proc/self/fd/${fd}" ] || - [ "${authority}" -nt "/proc/self/fd/${fd}" ] + { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || + [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] } && pkill -HUP --exact --parent=0 pgbackrest then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi done @@ -919,7 +942,7 @@ func TestAddServerToRepoPod(t *testing.T) { `)) // The server certificate comes from the pgBackRest Secret. - assert.Assert(t, marshalMatches(out.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` - name: pgbackrest-server projected: sources: diff --git a/internal/pgbackrest/restore.md b/internal/pgbackrest/restore.md index dc1b500811..8828576921 100644 --- a/internal/pgbackrest/restore.md +++ b/internal/pgbackrest/restore.md @@ -1,16 +1,7 @@ ## Target Action diff --git a/internal/pgbackrest/tls-server.md b/internal/pgbackrest/tls-server.md index 6d58d85f96..b572cc1ea4 100644 --- a/internal/pgbackrest/tls-server.md +++ b/internal/pgbackrest/tls-server.md @@ -1,16 +1,7 @@ # pgBackRest TLS Server @@ -21,8 +12,10 @@ on different pods: - [dedicated repository host](https://pgbackrest.org/user-guide.html#repo-host) - [backup from standby](https://pgbackrest.org/user-guide.html#standby-backup) -When a PostgresCluster is configured to store backups on a PVC, we start a dedicated -repository host to make that PVC available to all PostgreSQL instances in the cluster. +When a PostgresCluster is configured to store backups on a PVC, the dedicated +repository host is used to make that PVC available to all PostgreSQL instances +in the cluster. Regardless of whether the repo host has a defined PVC, it +functions as the server for the pgBackRest clients that run on the Instances. The repository host runs a `pgbackrest` server that is secured through TLS and [certificates][]. When performing backups, it connects to `pgbackrest` servers diff --git a/internal/pgbackrest/util.go b/internal/pgbackrest/util.go index 2c35e2a432..4fc2266c56 100644 --- a/internal/pgbackrest/util.go +++ b/internal/pgbackrest/util.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest @@ -30,9 +19,9 @@ import ( // multi-repository solution implemented within pgBackRest const maxPGBackrestRepos = 4 -// DedicatedRepoHostEnabled determines whether not a pgBackRest dedicated repository host is -// enabled according to the provided PostgresCluster -func DedicatedRepoHostEnabled(postgresCluster *v1beta1.PostgresCluster) bool { +// RepoHostVolumeDefined determines whether not at least one pgBackRest dedicated +// repository host volume has been defined in the PostgresCluster manifest. +func RepoHostVolumeDefined(postgresCluster *v1beta1.PostgresCluster) bool { for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { if repo.Volume != nil { return true diff --git a/internal/pgbackrest/util_test.go b/internal/pgbackrest/util_test.go index ca32af55f3..eb0f4dec29 100644 --- a/internal/pgbackrest/util_test.go +++ b/internal/pgbackrest/util_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbackrest diff --git a/internal/pgbouncer/assertions_test.go b/internal/pgbouncer/assertions_test.go deleted file mode 100644 index 237043239f..0000000000 --- a/internal/pgbouncer/assertions_test.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pgbouncer - -import ( - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -// marshalMatches converts actual to YAML and compares that to expected. -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} diff --git a/internal/pgbouncer/certificates.go b/internal/pgbouncer/certificates.go index 4fb0c4926e..31f91c503a 100644 --- a/internal/pgbouncer/certificates.go +++ b/internal/pgbouncer/certificates.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer diff --git a/internal/pgbouncer/certificates_test.go b/internal/pgbouncer/certificates_test.go index 04a8f9708e..5955c3de9c 100644 --- a/internal/pgbouncer/certificates_test.go +++ b/internal/pgbouncer/certificates_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -20,6 +9,8 @@ import ( "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) func TestBackendAuthority(t *testing.T) { @@ -27,7 +18,7 @@ func TestBackendAuthority(t *testing.T) { projection := &corev1.SecretProjection{ LocalObjectReference: corev1.LocalObjectReference{Name: "some-name"}, } - assert.Assert(t, marshalMatches(backendAuthority(projection), ` + assert.Assert(t, cmp.MarshalMatches(backendAuthority(projection), ` secret: items: - key: ca.crt @@ -40,7 +31,7 @@ secret: {Key: "some-crt-key", Path: "tls.crt"}, {Key: "some-ca-key", Path: "ca.crt"}, } - assert.Assert(t, marshalMatches(backendAuthority(projection), ` + assert.Assert(t, cmp.MarshalMatches(backendAuthority(projection), ` secret: items: - key: some-ca-key @@ -54,7 +45,7 @@ func TestFrontendCertificate(t *testing.T) { secret.Name = "op-secret" t.Run("Generated", func(t *testing.T) { - assert.Assert(t, marshalMatches(frontendCertificate(nil, secret), ` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(nil, secret), ` secret: items: - key: pgbouncer-frontend.ca-roots @@ -72,7 +63,7 @@ secret: custom.Name = "some-other" // No items; assume Key matches Path. - assert.Assert(t, marshalMatches(frontendCertificate(custom, secret), ` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(custom, secret), ` secret: items: - key: ca.crt @@ -91,7 +82,7 @@ secret: {Key: "some-cert-key", Path: "tls.crt"}, {Key: "some-key-key", Path: "tls.key"}, } - assert.Assert(t, marshalMatches(frontendCertificate(custom, secret), ` + assert.Assert(t, cmp.MarshalMatches(frontendCertificate(custom, secret), ` secret: items: - key: some-ca-key diff --git a/internal/pgbouncer/config.go b/internal/pgbouncer/config.go index 03da18ed12..a203144817 100644 --- a/internal/pgbouncer/config.go +++ b/internal/pgbouncer/config.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -250,11 +239,11 @@ func reloadCommand(name string) []string { // mtimes. // - https://unix.stackexchange.com/a/407383 const script = ` -exec {fd}<> <(:) -while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done diff --git a/internal/pgbouncer/config.md b/internal/pgbouncer/config.md index 8c6ee87012..abfec12518 100644 --- a/internal/pgbouncer/config.md +++ b/internal/pgbouncer/config.md @@ -1,16 +1,7 @@ PgBouncer is configured through INI files. It will reload these files when diff --git a/internal/pgbouncer/config_test.go b/internal/pgbouncer/config_test.go index 64973c3528..7a96da571c 100644 --- a/internal/pgbouncer/config_test.go +++ b/internal/pgbouncer/config_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -27,6 +16,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -150,7 +140,7 @@ func TestPodConfigFiles(t *testing.T) { t.Run("Default", func(t *testing.T) { projections := podConfigFiles(config, configmap, secret) - assert.Assert(t, marshalMatches(projections, ` + assert.Assert(t, cmp.MarshalMatches(projections, ` - configMap: items: - key: pgbouncer-empty @@ -183,7 +173,7 @@ func TestPodConfigFiles(t *testing.T) { } projections := podConfigFiles(config, configmap, secret) - assert.Assert(t, marshalMatches(projections, ` + assert.Assert(t, cmp.MarshalMatches(projections, ` - configMap: items: - key: pgbouncer-empty diff --git a/internal/pgbouncer/postgres.go b/internal/pgbouncer/postgres.go index 9fbf00f98b..cbc2e29916 100644 --- a/internal/pgbouncer/postgres.go +++ b/internal/pgbouncer/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer diff --git a/internal/pgbouncer/postgres_test.go b/internal/pgbouncer/postgres_test.go index f90c60df71..f2ce419753 100644 --- a/internal/pgbouncer/postgres_test.go +++ b/internal/pgbouncer/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer diff --git a/internal/pgbouncer/reconcile.go b/internal/pgbouncer/reconcile.go index 1d793cdbb4..999d6524a5 100644 --- a/internal/pgbouncer/reconcile.go +++ b/internal/pgbouncer/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -23,11 +12,11 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -41,7 +30,7 @@ func ConfigMap( return } - initialize.StringMap(&outConfigMap.Data) + initialize.Map(&outConfigMap.Data) outConfigMap.Data[emptyConfigMapKey] = "" outConfigMap.Data[iniFileConfigMapKey] = clusterINI(inCluster) @@ -61,7 +50,7 @@ func Secret(ctx context.Context, } var err error - initialize.ByteMap(&outSecret.Data) + initialize.Map(&outSecret.Data) // Use the existing password and verifier. Generate both when either is missing. // NOTE(cbandy): We don't have a function to compare a plaintext password @@ -114,6 +103,7 @@ func Secret(ctx context.Context, // Pod populates a PodSpec with the container and volumes needed to run PgBouncer. func Pod( + ctx context.Context, inCluster *v1beta1.PostgresCluster, inConfigMap *corev1.ConfigMap, inPostgreSQLCertificate *corev1.SecretProjection, @@ -191,7 +181,7 @@ func Pod( // If the PGBouncerSidecars feature gate is enabled and custom pgBouncer // sidecars are defined, add the defined container to the Pod. - if util.DefaultMutableFeatureGate.Enabled(util.PGBouncerSidecars) && + if feature.Enabled(ctx, feature.PGBouncerSidecars) && inCluster.Spec.Proxy.PGBouncer.Containers != nil { outPod.Containers = append(outPod.Containers, inCluster.Spec.Proxy.PGBouncer.Containers...) } diff --git a/internal/pgbouncer/reconcile_test.go b/internal/pgbouncer/reconcile_test.go index 9747e8cdc1..a53de8cf64 100644 --- a/internal/pgbouncer/reconcile_test.go +++ b/internal/pgbouncer/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgbouncer @@ -19,14 +8,15 @@ import ( "context" "testing" - "github.com/google/go-cmp/cmp" + gocmp "github.com/google/go-cmp/cmp" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -103,8 +93,8 @@ func TestSecret(t *testing.T) { func TestPod(t *testing.T) { t.Parallel() - // Initialize the feature gate - assert.NilError(t, util.AddAndSetFeatureGates("")) + features := feature.NewGate() + ctx := feature.NewContext(context.Background(), features) cluster := new(v1beta1.PostgresCluster) configMap := new(corev1.ConfigMap) @@ -112,7 +102,7 @@ func TestPod(t *testing.T) { secret := new(corev1.Secret) pod := new(corev1.PodSpec) - call := func() { Pod(cluster, configMap, primaryCertificate, secret, pod) } + call := func() { Pod(ctx, cluster, configMap, primaryCertificate, secret, pod) } t.Run("Disabled", func(t *testing.T) { before := pod.DeepCopy() @@ -129,7 +119,7 @@ func TestPod(t *testing.T) { call() - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer @@ -160,11 +150,11 @@ containers: - -- - |- monitor() { - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done @@ -239,7 +229,7 @@ volumes: call() - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer @@ -274,11 +264,11 @@ containers: - -- - |- monitor() { - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done @@ -349,7 +339,7 @@ volumes: call() - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - command: - pgbouncer @@ -384,11 +374,11 @@ containers: - -- - |- monitor() { - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && pkill -HUP --exact pgbouncer + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && pkill -HUP --exact pgbouncer then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded configuration dated %y' "${directory}" fi done @@ -457,7 +447,9 @@ volumes: }) t.Run("SidecarEnabled", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.PGBouncerSidecars+"=true"))) + assert.NilError(t, features.SetFromMap(map[string]bool{ + feature.PGBouncerSidecars: true, + })) call() assert.Equal(t, len(pod.Containers), 3, "expected 3 containers in Pod, got %d", len(pod.Containers)) @@ -499,6 +491,6 @@ func TestPostgreSQL(t *testing.T) { Mandatory: postgresqlHBAs(), }, // postgres.HostBasedAuthentication has unexported fields. Call String() to compare. - cmp.Transformer("", postgres.HostBasedAuthentication.String)) + gocmp.Transformer("", postgres.HostBasedAuthentication.String)) }) } diff --git a/internal/pgmonitor/exporter.go b/internal/pgmonitor/exporter.go index d55e363d19..9d7a1fc3c6 100644 --- a/internal/pgmonitor/exporter.go +++ b/internal/pgmonitor/exporter.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor @@ -43,7 +32,8 @@ const ( // postgres_exporter command flags var ( - ExporterWebConfigFileFlag = "--web.config.file=/web-config/web-config.yml" + ExporterWebConfigFileFlag = "--web.config.file=/web-config/web-config.yml" + ExporterDeactivateStatBGWriterFlag = "--no-collector.stat_bgwriter" ) // Defaults for certain values used in queries.yml @@ -160,11 +150,11 @@ func ExporterStartCommand(builtinCollectors bool, commandFlags ...string) []stri // Create a file descriptor with a no-op process that will not get // cleaned up - `exec {fd}<> <(:)`, + `exec {fd}<> <(:||:)`, // Set up loop. Use read's timeout setting instead of sleep, // which uses up a lot of memory - `while read -r -t 3 -u "${fd}" || true; do`, + `while read -r -t 3 -u "${fd}" ||:; do`, // If either directories' modify time is newer than our file descriptor's, // something must have changed, so kill the postgres_exporter @@ -174,14 +164,14 @@ func ExporterStartCommand(builtinCollectors bool, commandFlags ...string) []stri // When something changes we want to get rid of the old file descriptor, get a fresh one // and restart the loop ` echo "Something changed..."`, - ` exec {fd}>&- && exec {fd}<> <(:)`, + ` exec {fd}>&- && exec {fd}<> <(:||:)`, ` stat --format='Latest queries file dated %y' "/conf"`, ` stat --format='Latest password file dated %y' "/opt/crunchy/password"`, ` fi`, // If postgres_exporter is not running, restart it // Use the recorded pid as a proxy for checking if postgres_exporter is running - ` if [ ! -e /proc/$(head -1 ${POSTGRES_EXPORTER_PIDFILE?}) ] ; then`, + ` if [[ ! -e /proc/$(head -1 ${POSTGRES_EXPORTER_PIDFILE?}) ]] ; then`, ` start_postgres_exporter`, ` fi`, `done`, diff --git a/internal/pgmonitor/exporter_test.go b/internal/pgmonitor/exporter_test.go index f65272ca87..5ba14e0993 100644 --- a/internal/pgmonitor/exporter_test.go +++ b/internal/pgmonitor/exporter_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pgmonitor/postgres.go b/internal/pgmonitor/postgres.go index d433fc08e0..8aed164a18 100644 --- a/internal/pgmonitor/postgres.go +++ b/internal/pgmonitor/postgres.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pgmonitor/postgres_test.go b/internal/pgmonitor/postgres_test.go index d4caaefd68..655fa936ae 100644 --- a/internal/pgmonitor/postgres_test.go +++ b/internal/pgmonitor/postgres_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pgmonitor/util.go b/internal/pgmonitor/util.go index 410594eea4..f5606ccd08 100644 --- a/internal/pgmonitor/util.go +++ b/internal/pgmonitor/util.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pgmonitor/util_test.go b/internal/pgmonitor/util_test.go index 55c6bd0fcf..8d16d74bae 100644 --- a/internal/pgmonitor/util_test.go +++ b/internal/pgmonitor/util_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pgmonitor diff --git a/internal/pki/common.go b/internal/pki/common.go index 13c573cd2b..fbe9421f8b 100644 --- a/internal/pki/common.go +++ b/internal/pki/common.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/doc.go b/internal/pki/doc.go index bfbe34e3c1..71f8c0a1bc 100644 --- a/internal/pki/doc.go +++ b/internal/pki/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package pki provides types and functions to support the public key // infrastructure of the Postgres Operator. It enforces a two layer system diff --git a/internal/pki/encoding.go b/internal/pki/encoding.go index b7ebe4eed1..2d2cd851e3 100644 --- a/internal/pki/encoding.go +++ b/internal/pki/encoding.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/encoding_test.go b/internal/pki/encoding_test.go index dc116a2947..cdf7c0de5a 100644 --- a/internal/pki/encoding_test.go +++ b/internal/pki/encoding_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/pki.go b/internal/pki/pki.go index 9f923bb9f7..7048810654 100644 --- a/internal/pki/pki.go +++ b/internal/pki/pki.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/pki/pki_test.go b/internal/pki/pki_test.go index 1905c417ae..cd13896450 100644 --- a/internal/pki/pki_test.go +++ b/internal/pki/pki_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package pki diff --git a/internal/postgis/postgis.go b/internal/postgis/postgis.go index aaf88f6a8e..f54da0dd93 100644 --- a/internal/postgis/postgis.go +++ b/internal/postgis/postgis.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgis diff --git a/internal/postgis/postgis_test.go b/internal/postgis/postgis_test.go index 97cd338daa..5f604abc90 100644 --- a/internal/postgis/postgis_test.go +++ b/internal/postgis/postgis_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgis diff --git a/internal/postgres/assertions_test.go b/internal/postgres/assertions_test.go deleted file mode 100644 index 79104c14f5..0000000000 --- a/internal/postgres/assertions_test.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package postgres - -import ( - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -func marshalMatches(actual interface{}, expected string) cmp.Comparison { - return cmp.MarshalMatches(actual, expected) -} diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 8b13fbbce1..ce1acde3fb 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -1,29 +1,19 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres import ( + "context" "fmt" "strings" corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -55,7 +45,7 @@ recreate() ( safelink() ( local desired="$1" name="$2" current current=$(realpath "${name}") - if [ "${current}" = "${desired}" ]; then return; fi + if [[ "${current}" == "${desired}" ]]; then return; fi set -x; mv --no-target-directory "${current}" "${desired}" ln --no-dereference --force --symbolic "${desired}" "${name}" ) @@ -102,12 +92,17 @@ func DataDirectory(cluster *v1beta1.PostgresCluster) string { func WALDirectory( cluster *v1beta1.PostgresCluster, instance *v1beta1.PostgresInstanceSetSpec, ) string { - // When no WAL volume is specified, store WAL files on the main data volume. - walStorage := dataMountPath + return fmt.Sprintf("%s/pg%d_wal", WALStorage(instance), cluster.Spec.PostgresVersion) +} + +// WALStorage returns the absolute path to the disk where an instance stores its +// WAL files. Use [WALDirectory] for the exact directory that Postgres uses. +func WALStorage(instance *v1beta1.PostgresInstanceSetSpec) string { if instance.WALVolumeClaimSpec != nil { - walStorage = walMountPath + return walMountPath } - return fmt.Sprintf("%s/pg%d_wal", walStorage, cluster.Spec.PostgresVersion) + // When no WAL volume is specified, store WAL files on the main data volume. + return dataMountPath } // Environment returns the environment variables required to invoke PostgreSQL @@ -142,6 +137,19 @@ func Environment(cluster *v1beta1.PostgresCluster) []corev1.EnvVar { Name: "KRB5RCACHEDIR", Value: "/tmp", }, + // This allows a custom CA certificate to be mounted for Postgres LDAP + // authentication via spec.config.files. + // - https://wiki.postgresql.org/wiki/LDAP_Authentication_against_AD + // + // When setting the TLS_CACERT for LDAP as an environment variable, 'LDAP' + // must be appended as a prefix. + // - https://www.openldap.org/software/man.cgi?query=ldap.conf + // + // Testing with LDAPTLS_CACERTDIR did not work as expected during testing. + { + Name: "LDAPTLS_CACERT", + Value: configMountPath + "/ldap/ca.crt", + }, } } @@ -172,16 +180,39 @@ func reloadCommand(name string) []string { // mtimes. // - https://unix.stackexchange.com/a/407383 script := fmt.Sprintf(` +# Parameters for curl when managing autogrow annotation. +APISERVER="https://kubernetes.default.svc" +SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" +NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) +TOKEN=$(cat ${SERVICEACCOUNT}/token) +CACERT=${SERVICEACCOUNT}/ca.crt + declare -r directory=%q -exec {fd}<> <(:) -while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && +exec {fd}<> <(:||:) +while read -r -t 5 -u "${fd}" ||:; do + # Manage replication certificate. + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && install -D --mode=0600 -t %q "${directory}"/{%s,%s,%s} && pkill -HUP --exact --parent=1 postgres then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %%y' "${directory}" fi + + # Manage autogrow annotation. + # Return size in Mebibytes. + size=$(df --human-readable --block-size=M /pgdata | awk 'FNR == 2 {print $2}') + use=$(df --human-readable /pgdata | awk 'FNR == 2 {print $5}') + sizeInt="${size//M/}" + # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. + useInt=$(echo $use | sed 's/[[:punct:]]//g') + triggerExpansion="$((useInt > 75))" + if [ $triggerExpansion -eq 1 ]; then + newSize="$(((sizeInt / 2)+sizeInt))" + newSizeMi="${newSize}Mi" + d='[{"op": "add", "path": "/metadata/annotations/suggested-pgdata-pvc-size", "value": "'"$newSizeMi"'"}]' + curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + fi done `, naming.CertMountPath, @@ -201,6 +232,7 @@ done // startupCommand returns an entrypoint that prepares the filesystem for // PostgreSQL. func startupCommand( + ctx context.Context, cluster *v1beta1.PostgresCluster, instance *v1beta1.PostgresInstanceSetSpec, ) []string { version := fmt.Sprint(cluster.Spec.PostgresVersion) @@ -209,7 +241,7 @@ func startupCommand( // If the user requests tablespaces, we want to make sure the directories exist with the // correct owner and permissions. tablespaceCmd := "" - if util.DefaultMutableFeatureGate.Enabled(util.TablespaceVolumes) { + if feature.Enabled(ctx, feature.TablespaceVolumes) { // This command checks if a dir exists and if not, creates it; // if the dir does exist, then we `recreate` it to make sure the owner is correct; // if the dir exists with the wrong owner and is not writeable, we error. @@ -280,27 +312,32 @@ chmod +x /tmp/pg_rewind_tde.sh // Log the effective user ID and all the group IDs. `echo Initializing ...`, - `results 'uid' "$(id -u)" 'gid' "$(id -G)"`, + `results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)"`, + + // The pgbackrest spool path should be co-located with wal. If a wal volume exists, symlink the spool-path to it. + `if [[ "${pgwal_directory}" == *"pgwal/"* ]] && [[ ! -d "/pgwal/pgbackrest-spool" ]];then rm -rf "/pgdata/pgbackrest-spool" && mkdir -p "/pgwal/pgbackrest-spool" && ln --force --symbolic "/pgwal/pgbackrest-spool" "/pgdata/pgbackrest-spool";fi`, + // When a pgwal volume is removed, the symlink will be broken; force pgbackrest to recreate spool-path. + `if [[ ! -e "/pgdata/pgbackrest-spool" ]];then rm -rf /pgdata/pgbackrest-spool;fi`, // Abort when the PostgreSQL version installed in the image does not // match the cluster spec. - `results 'postgres path' "$(command -v postgres)"`, - `results 'postgres version' "${postgres_version:=$(postgres --version)}"`, + `results 'postgres path' "$(command -v postgres ||:)"`, + `results 'postgres version' "${postgres_version:=$(postgres --version ||:)}"`, `[[ "${postgres_version}" =~ ") ${expected_major_version}"($|[^0-9]) ]] ||`, `halt Expected PostgreSQL version "${expected_major_version}"`, // Abort when the configured data directory is not $PGDATA. // - https://www.postgresql.org/docs/current/runtime-config-file-locations.html `results 'config directory' "${PGDATA:?}"`, - `postgres_data_directory=$([ -d "${PGDATA}" ] && postgres -C data_directory || echo "${PGDATA}")`, + `postgres_data_directory=$([[ -d "${PGDATA}" ]] && postgres -C data_directory || echo "${PGDATA}")`, `results 'data directory' "${postgres_data_directory}"`, `[[ "${postgres_data_directory}" == "${PGDATA}" ]] ||`, `halt Expected matching config and data directories`, // Determine if the data directory has been prepared for bootstrapping the cluster `bootstrap_dir="${postgres_data_directory}_bootstrap"`, - `[ -d "${bootstrap_dir}" ] && results 'bootstrap directory' "${bootstrap_dir}"`, - `[ -d "${bootstrap_dir}" ] && postgres_data_directory="${bootstrap_dir}"`, + `[[ -d "${bootstrap_dir}" ]] && results 'bootstrap directory' "${bootstrap_dir}"`, + `[[ -d "${bootstrap_dir}" ]] && postgres_data_directory="${bootstrap_dir}"`, // PostgreSQL requires its directory to be writable by only itself. // Pod "securityContext.fsGroup" sets g+w on directories for *some* @@ -350,7 +387,7 @@ chmod +x /tmp/pg_rewind_tde.sh tablespaceCmd, // When the data directory is empty, there's nothing more to do. - `[ -f "${postgres_data_directory}/PG_VERSION" ] || exit 0`, + `[[ -f "${postgres_data_directory}/PG_VERSION" ]] || exit 0`, // Abort when the data directory is not empty and its version does not // match the cluster spec. @@ -374,7 +411,7 @@ chmod +x /tmp/pg_rewind_tde.sh // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/initdb/initdb.c;hb=REL_13_0#l2718 // - https://git.postgresql.org/gitweb/?p=postgresql.git;f=src/bin/pg_basebackup/pg_basebackup.c;hb=REL_13_0#l2621 `safelink "${pgwal_directory}" "${postgres_data_directory}/pg_wal"`, - `results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal")"`, + `results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal" ||:)"`, // Early versions of PGO create replicas with a recovery signal file. // Patroni also creates a standby signal file before starting Postgres, diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index 2de7ebcabc..cd4c92d185 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -1,22 +1,12 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres import ( "bytes" + "context" "errors" "fmt" "os" @@ -31,7 +21,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -466,13 +455,14 @@ func TestBashSafeLink(t *testing.T) { func TestStartupCommand(t *testing.T) { shellcheck := require.ShellCheck(t) + t.Parallel() - assert.NilError(t, util.AddAndSetFeatureGates(string(util.TablespaceVolumes+"=false"))) cluster := new(v1beta1.PostgresCluster) cluster.Spec.PostgresVersion = 13 instance := new(v1beta1.PostgresInstanceSetSpec) - command := startupCommand(cluster, instance) + ctx := context.Background() + command := startupCommand(ctx, cluster, instance) // Expect a bash command with an inline script. assert.DeepEqual(t, command[:3], []string{"bash", "-ceu", "--"}) @@ -507,7 +497,7 @@ func TestStartupCommand(t *testing.T) { }, }, } - command := startupCommand(cluster, instance) + command := startupCommand(ctx, cluster, instance) assert.Assert(t, len(command) > 3) assert.Assert(t, strings.Contains(command[3], `cat << "EOF" > /tmp/pg_rewind_tde.sh #!/bin/sh diff --git a/internal/postgres/databases.go b/internal/postgres/databases.go index 8c46b3e19f..0d70170527 100644 --- a/internal/postgres/databases.go +++ b/internal/postgres/databases.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/databases_test.go b/internal/postgres/databases_test.go index f6f276ab0b..e025e86788 100644 --- a/internal/postgres/databases_test.go +++ b/internal/postgres/databases_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/doc.go b/internal/postgres/doc.go index e84fce010a..bd616b5916 100644 --- a/internal/postgres/doc.go +++ b/internal/postgres/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package postgres is a collection of resources that interact with PostgreSQL // or provide functionality that makes it easier for other resources to interact diff --git a/internal/postgres/exec.go b/internal/postgres/exec.go index 326588bdff..a846a8aa57 100644 --- a/internal/postgres/exec.go +++ b/internal/postgres/exec.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/exec_test.go b/internal/postgres/exec_test.go index c2f56e7fd0..df9b862577 100644 --- a/internal/postgres/exec_test.go +++ b/internal/postgres/exec_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/hba.go b/internal/postgres/hba.go index fd358ea96b..d9b5ce2680 100644 --- a/internal/postgres/hba.go +++ b/internal/postgres/hba.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/hba_test.go b/internal/postgres/hba_test.go index 5f7a5c0075..9744479fdd 100644 --- a/internal/postgres/hba_test.go +++ b/internal/postgres/hba_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/huge_pages.go b/internal/postgres/huge_pages.go index 0e97e094d9..ee13c0d11b 100644 --- a/internal/postgres/huge_pages.go +++ b/internal/postgres/huge_pages.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/huge_pages_test.go b/internal/postgres/huge_pages_test.go index c21f96750e..58a6a6aa57 100644 --- a/internal/postgres/huge_pages_test.go +++ b/internal/postgres/huge_pages_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/iana.go b/internal/postgres/iana.go index e43cec1fd8..4392b549f1 100644 --- a/internal/postgres/iana.go +++ b/internal/postgres/iana.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/parameters.go b/internal/postgres/parameters.go index 35cc30aa9c..434d9fd1dd 100644 --- a/internal/postgres/parameters.go +++ b/internal/postgres/parameters.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/parameters_test.go b/internal/postgres/parameters_test.go index f87738ed77..c6228d7958 100644 --- a/internal/postgres/parameters_test.go +++ b/internal/postgres/parameters_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres diff --git a/internal/postgres/password/doc.go b/internal/postgres/password/doc.go index 3abf99d988..eef7ed7db2 100644 --- a/internal/postgres/password/doc.go +++ b/internal/postgres/password/doc.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // package password lets one create the appropriate password hashes and // verifiers that are used for adding the information into PostgreSQL diff --git a/internal/postgres/password/md5.go b/internal/postgres/password/md5.go index 648d4edc24..884dfb655e 100644 --- a/internal/postgres/password/md5.go +++ b/internal/postgres/password/md5.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package password diff --git a/internal/postgres/password/md5_test.go b/internal/postgres/password/md5_test.go index 11ee6465a2..80cb7742d6 100644 --- a/internal/postgres/password/md5_test.go +++ b/internal/postgres/password/md5_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package password diff --git a/internal/postgres/password/password.go b/internal/postgres/password/password.go index 07ec826a9a..337282cc74 100644 --- a/internal/postgres/password/password.go +++ b/internal/postgres/password/password.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package password diff --git a/internal/postgres/password/password_test.go b/internal/postgres/password/password_test.go index 9688616b01..3401dec4ac 100644 --- a/internal/postgres/password/password_test.go +++ b/internal/postgres/password/password_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package password diff --git a/internal/postgres/password/scram.go b/internal/postgres/password/scram.go index 66f5cd8151..8264cd87a0 100644 --- a/internal/postgres/password/scram.go +++ b/internal/postgres/password/scram.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package password diff --git a/internal/postgres/password/scram_test.go b/internal/postgres/password/scram_test.go index 6f2ca2505f..0552e519b7 100644 --- a/internal/postgres/password/scram_test.go +++ b/internal/postgres/password/scram_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package password diff --git a/internal/postgres/reconcile.go b/internal/postgres/reconcile.go index e0334f1ff8..344f91dd9f 100644 --- a/internal/postgres/reconcile.go +++ b/internal/postgres/reconcile.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -22,9 +11,9 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -195,7 +184,7 @@ func InstancePod(ctx context.Context, ImagePullPolicy: container.ImagePullPolicy, SecurityContext: initialize.RestrictedSecurityContext(), - VolumeMounts: []corev1.VolumeMount{certVolumeMount}, + VolumeMounts: []corev1.VolumeMount{certVolumeMount, dataVolumeMount}, } if inInstanceSpec.Sidecars != nil && @@ -207,7 +196,7 @@ func InstancePod(ctx context.Context, startup := corev1.Container{ Name: naming.ContainerPostgresStartup, - Command: startupCommand(inCluster, inInstanceSpec), + Command: startupCommand(ctx, inCluster, inInstanceSpec), Env: Environment(inCluster), Image: container.Image, @@ -276,7 +265,7 @@ func InstancePod(ctx context.Context, // If the InstanceSidecars feature gate is enabled and instance sidecars are // defined, add the defined container to the Pod. - if util.DefaultMutableFeatureGate.Enabled(util.InstanceSidecars) && + if feature.Enabled(ctx, feature.InstanceSidecars) && inInstanceSpec.Containers != nil { outInstancePod.Containers = append(outInstancePod.Containers, inInstanceSpec.Containers...) } @@ -294,8 +283,7 @@ func PodSecurityContext(cluster *v1beta1.PostgresCluster) *corev1.PodSecurityCon // - https://docs.k8s.io/concepts/security/pod-security-standards/ for i := range cluster.Spec.SupplementalGroups { if gid := cluster.Spec.SupplementalGroups[i]; gid > 0 { - podSecurityContext.SupplementalGroups = - append(podSecurityContext.SupplementalGroups, gid) + podSecurityContext.SupplementalGroups = append(podSecurityContext.SupplementalGroups, gid) } } diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index 40886fb97d..138b5c7b3e 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -23,9 +12,10 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/util" + "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -70,11 +60,9 @@ func TestTablespaceVolumeMount(t *testing.T) { } func TestInstancePod(t *testing.T) { - ctx := context.Background() - - // Initialize the feature gate - assert.NilError(t, util.AddAndSetFeatureGates("")) + t.Parallel() + ctx := context.Background() cluster := new(v1beta1.PostgresCluster) cluster.Default() cluster.Spec.ImagePullPolicy = corev1.PullAlways @@ -130,7 +118,7 @@ func TestInstancePod(t *testing.T) { InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) - assert.Assert(t, marshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(pod, ` containers: - env: - name: PGDATA @@ -143,6 +131,8 @@ containers: value: /etc/postgres/krb5.conf - name: KRB5RCACHEDIR value: /tmp + - name: LDAPTLS_CACERT + value: /etc/postgres/ldap/ca.crt imagePullPolicy: Always name: database ports: @@ -177,16 +167,39 @@ containers: - -- - |- monitor() { + # Parameters for curl when managing autogrow annotation. + APISERVER="https://kubernetes.default.svc" + SERVICEACCOUNT="/var/run/secrets/kubernetes.io/serviceaccount" + NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + TOKEN=$(cat ${SERVICEACCOUNT}/token) + CACERT=${SERVICEACCOUNT}/ca.crt + declare -r directory="/pgconf/tls" - exec {fd}<> <(:) - while read -r -t 5 -u "${fd}" || true; do - if [ "${directory}" -nt "/proc/self/fd/${fd}" ] && + exec {fd}<> <(:||:) + while read -r -t 5 -u "${fd}" ||:; do + # Manage replication certificate. + if [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] && install -D --mode=0600 -t "/tmp/replication" "${directory}"/{replication/tls.crt,replication/tls.key,replication/ca.crt} && pkill -HUP --exact --parent=1 postgres then - exec {fd}>&- && exec {fd}<> <(:) + exec {fd}>&- && exec {fd}<> <(:||:) stat --format='Loaded certificates dated %y' "${directory}" fi + + # Manage autogrow annotation. + # Return size in Mebibytes. + size=$(df --human-readable --block-size=M /pgdata | awk 'FNR == 2 {print $2}') + use=$(df --human-readable /pgdata | awk 'FNR == 2 {print $5}') + sizeInt="${size//M/}" + # Use the sed punctuation class, because the shell will not accept the percent sign in an expansion. + useInt=$(echo $use | sed 's/[[:punct:]]//g') + triggerExpansion="$((useInt > 75))" + if [ $triggerExpansion -eq 1 ]; then + newSize="$(((sizeInt / 2)+sizeInt))" + newSizeMi="${newSize}Mi" + d='[{"op": "add", "path": "/metadata/annotations/suggested-pgdata-pvc-size", "value": "'"$newSizeMi"'"}]' + curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -XPATCH "${APISERVER}/api/v1/namespaces/${NAMESPACE}/pods/${HOSTNAME}?fieldManager=kubectl-annotate" -H "Content-Type: application/json-patch+json" --data "$d" + fi done }; export -f monitor; exec -a "$0" bash -ceu monitor - replication-cert-copy @@ -209,6 +222,8 @@ containers: - mountPath: /pgconf/tls name: cert-volume readOnly: true + - mountPath: /pgdata + name: postgres-data initContainers: - command: - bash @@ -226,24 +241,26 @@ initContainers: safelink() ( local desired="$1" name="$2" current current=$(realpath "${name}") - if [ "${current}" = "${desired}" ]; then return; fi + if [[ "${current}" == "${desired}" ]]; then return; fi set -x; mv --no-target-directory "${current}" "${desired}" ln --no-dereference --force --symbolic "${desired}" "${name}" ) echo Initializing ... - results 'uid' "$(id -u)" 'gid' "$(id -G)" - results 'postgres path' "$(command -v postgres)" - results 'postgres version' "${postgres_version:=$(postgres --version)}" + results 'uid' "$(id -u ||:)" 'gid' "$(id -G ||:)" + if [[ "${pgwal_directory}" == *"pgwal/"* ]] && [[ ! -d "/pgwal/pgbackrest-spool" ]];then rm -rf "/pgdata/pgbackrest-spool" && mkdir -p "/pgwal/pgbackrest-spool" && ln --force --symbolic "/pgwal/pgbackrest-spool" "/pgdata/pgbackrest-spool";fi + if [[ ! -e "/pgdata/pgbackrest-spool" ]];then rm -rf /pgdata/pgbackrest-spool;fi + results 'postgres path' "$(command -v postgres ||:)" + results 'postgres version' "${postgres_version:=$(postgres --version ||:)}" [[ "${postgres_version}" =~ ") ${expected_major_version}"($|[^0-9]) ]] || halt Expected PostgreSQL version "${expected_major_version}" results 'config directory' "${PGDATA:?}" - postgres_data_directory=$([ -d "${PGDATA}" ] && postgres -C data_directory || echo "${PGDATA}") + postgres_data_directory=$([[ -d "${PGDATA}" ]] && postgres -C data_directory || echo "${PGDATA}") results 'data directory' "${postgres_data_directory}" [[ "${postgres_data_directory}" == "${PGDATA}" ]] || halt Expected matching config and data directories bootstrap_dir="${postgres_data_directory}_bootstrap" - [ -d "${bootstrap_dir}" ] && results 'bootstrap directory' "${bootstrap_dir}" - [ -d "${bootstrap_dir}" ] && postgres_data_directory="${bootstrap_dir}" + [[ -d "${bootstrap_dir}" ]] && results 'bootstrap directory' "${bootstrap_dir}" + [[ -d "${bootstrap_dir}" ]] && postgres_data_directory="${bootstrap_dir}" if [[ ! -e "${postgres_data_directory}" || -O "${postgres_data_directory}" ]]; then install --directory --mode=0700 "${postgres_data_directory}" elif [[ -w "${postgres_data_directory}" && -g "${postgres_data_directory}" ]]; then @@ -256,14 +273,14 @@ initContainers: install -D --mode=0600 -t "/tmp/replication" "/pgconf/tls/replication"/{tls.crt,tls.key,ca.crt} - [ -f "${postgres_data_directory}/PG_VERSION" ] || exit 0 + [[ -f "${postgres_data_directory}/PG_VERSION" ]] || exit 0 results 'data version' "${postgres_data_version:=$(< "${postgres_data_directory}/PG_VERSION")}" [[ "${postgres_data_version}" == "${expected_major_version}" ]] || halt Expected PostgreSQL data version "${expected_major_version}" [[ ! -f "${postgres_data_directory}/postgresql.conf" ]] && touch "${postgres_data_directory}/postgresql.conf" safelink "${pgwal_directory}" "${postgres_data_directory}/pg_wal" - results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal")" + results 'wal directory' "$(realpath "${postgres_data_directory}/pg_wal" ||:)" rm -f "${postgres_data_directory}/recovery.signal" - startup - "11" @@ -280,6 +297,8 @@ initContainers: value: /etc/postgres/krb5.conf - name: KRB5RCACHEDIR value: /tmp + - name: LDAPTLS_CACERT + value: /etc/postgres/ldap/ca.crt imagePullPolicy: Always name: postgres-startup resources: @@ -370,7 +389,7 @@ volumes: assert.Assert(t, len(pod.InitContainers) > 0) // Container has all mountPaths, including downwardAPI - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -383,7 +402,7 @@ volumes: name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -392,7 +411,7 @@ volumes: - mountPath: /pgwal name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) - assert.Assert(t, marshalMatches(pod.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` - name: cert-volume projected: defaultMode: 384 @@ -478,7 +497,7 @@ volumes: // Container has all mountPaths, including downwardAPI, // and the postgres-config - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -492,7 +511,7 @@ volumes: readOnly: true`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI and additionalConfig - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -514,7 +533,12 @@ volumes: }) t.Run("SidecarEnabled", func(t *testing.T) { - assert.NilError(t, util.AddAndSetFeatureGates(string(util.InstanceSidecars+"=true"))) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.InstanceSidecars: true, + })) + ctx := feature.NewContext(ctx, gate) + InstancePod(ctx, cluster, sidecarInstance, serverSecretProjection, clientSecretProjection, dataVolume, nil, nil, pod) @@ -532,7 +556,6 @@ volumes: }) t.Run("WithTablespaces", func(t *testing.T) { - clusterWithTablespaces := cluster.DeepCopy() clusterWithTablespaces.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{ { @@ -556,7 +579,7 @@ volumes: InstancePod(ctx, cluster, instance, serverSecretProjection, clientSecretProjection, dataVolume, nil, tablespaceVolumes, pod) - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -571,7 +594,7 @@ volumes: name: tablespace-trial`), "expected tablespace mount(s) in %q container", pod.Containers[0].Name) // InitContainer has all mountPaths, except downwardAPI and additionalConfig - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -597,7 +620,7 @@ volumes: assert.Assert(t, len(pod.Containers) > 0) assert.Assert(t, len(pod.InitContainers) > 0) - assert.Assert(t, marshalMatches(pod.Containers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.Containers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -609,7 +632,7 @@ volumes: - mountPath: /pgwal name: postgres-wal`), "expected WAL and downwardAPI mounts in %q container", pod.Containers[0].Name) - assert.Assert(t, marshalMatches(pod.InitContainers[0].VolumeMounts, ` + assert.Assert(t, cmp.MarshalMatches(pod.InitContainers[0].VolumeMounts, ` - mountPath: /pgconf/tls name: cert-volume readOnly: true @@ -618,7 +641,7 @@ volumes: - mountPath: /pgwal name: postgres-wal`), "expected WAL mount, no downwardAPI mount in %q container", pod.InitContainers[0].Name) - assert.Assert(t, marshalMatches(pod.Volumes, ` + assert.Assert(t, cmp.MarshalMatches(pod.Volumes, ` - name: cert-volume projected: defaultMode: 384 @@ -688,23 +711,23 @@ func TestPodSecurityContext(t *testing.T) { cluster := new(v1beta1.PostgresCluster) cluster.Default() - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroup: 26 fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.OpenShift = initialize.Bool(true) - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.SupplementalGroups = []int64{} - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroupChangePolicy: OnRootMismatch `)) cluster.Spec.SupplementalGroups = []int64{999, 65000} - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroupChangePolicy: OnRootMismatch supplementalGroups: - 999 @@ -712,7 +735,7 @@ supplementalGroups: `)) *cluster.Spec.OpenShift = false - assert.Assert(t, marshalMatches(PodSecurityContext(cluster), ` + assert.Assert(t, cmp.MarshalMatches(PodSecurityContext(cluster), ` fsGroup: 26 fsGroupChangePolicy: OnRootMismatch supplementalGroups: diff --git a/internal/postgres/users.go b/internal/postgres/users.go index bfe9597ef1..be8785a4e5 100644 --- a/internal/postgres/users.go +++ b/internal/postgres/users.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -23,10 +12,18 @@ import ( pg_query "github.com/pganalyze/pg_query_go/v5" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +var RESERVED_SCHEMA_NAMES = map[string]bool{ + "public": true, // This is here for documentation; Postgres will reject a role named `public` as reserved + "pgbouncer": true, + "monitor": true, +} + func sanitizeAlterRoleOptions(options string) string { const AlterRolePrefix = `ALTER ROLE "any" WITH ` @@ -61,7 +58,7 @@ func sanitizeAlterRoleOptions(options string) string { // grants them access to their specified databases. The databases must already // exist. func WriteUsersInPostgreSQL( - ctx context.Context, exec Executor, + ctx context.Context, cluster *v1beta1.PostgresCluster, exec Executor, users []v1beta1.PostgresUserSpec, verifiers map[string]string, ) error { log := logging.FromContext(ctx) @@ -162,5 +159,83 @@ SELECT pg_catalog.format('GRANT ALL PRIVILEGES ON DATABASE %I TO %I', log.V(1).Info("wrote PostgreSQL users", "stdout", stdout, "stderr", stderr) + // The operator will attempt to write schemas for the users in the spec if + // * the feature gate is enabled and + // * the cluster is annotated. + if feature.Enabled(ctx, feature.AutoCreateUserSchema) { + autoCreateUserSchemaAnnotationValue, annotationExists := cluster.Annotations[naming.AutoCreateUserSchemaAnnotation] + if annotationExists && strings.EqualFold(autoCreateUserSchemaAnnotationValue, "true") { + log.V(1).Info("Writing schemas for users.") + err = WriteUsersSchemasInPostgreSQL(ctx, exec, users) + } + } + + return err +} + +// WriteUsersSchemasInPostgreSQL will create a schema for each user in each database that user has access to +func WriteUsersSchemasInPostgreSQL(ctx context.Context, exec Executor, + users []v1beta1.PostgresUserSpec) error { + + log := logging.FromContext(ctx) + + var err error + var stdout string + var stderr string + + for i := range users { + spec := users[i] + + // We skip if the user has the name of a reserved schema + if RESERVED_SCHEMA_NAMES[string(spec.Name)] { + log.V(1).Info("Skipping schema creation for user with reserved name", + "name", string(spec.Name)) + continue + } + + // We skip if the user has no databases + if len(spec.Databases) == 0 { + continue + } + + var sql bytes.Buffer + + // Prevent unexpected dereferences by emptying "search_path". The "pg_catalog" + // schema is still searched, and only temporary objects can be created. + // - https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-SEARCH-PATH + _, _ = sql.WriteString(`SET search_path TO '';`) + + _, _ = sql.WriteString(`SELECT * FROM json_array_elements_text(:'databases');`) + + databases, _ := json.Marshal(spec.Databases) + + stdout, stderr, err = exec.ExecInDatabasesFromQuery(ctx, + sql.String(), + strings.Join([]string{ + // Quiet NOTICE messages from IF EXISTS statements. + // - https://www.postgresql.org/docs/current/runtime-config-client.html + `SET client_min_messages = WARNING;`, + + // Creates a schema named after and owned by the user + // - https://www.postgresql.org/docs/current/ddl-schemas.html + // - https://www.postgresql.org/docs/current/sql-createschema.html + + // We create a schema named after the user because + // the PG search_path does not need to be updated, + // since search_path defaults to "$user", public. + // - https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATH + `CREATE SCHEMA IF NOT EXISTS :"username" AUTHORIZATION :"username";`, + }, "\n"), + map[string]string{ + "databases": string(databases), + "username": string(spec.Name), + + "ON_ERROR_STOP": "on", // Abort when any one statement fails. + "QUIET": "on", // Do not print successful commands to stdout. + }, + ) + + log.V(1).Info("wrote PostgreSQL schemas", "stdout", stdout, "stderr", stderr) + } return err } diff --git a/internal/postgres/users_test.go b/internal/postgres/users_test.go index 2025f92d45..141175c78e 100644 --- a/internal/postgres/users_test.go +++ b/internal/postgres/users_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package postgres @@ -19,6 +8,7 @@ import ( "context" "errors" "io" + "regexp" "strings" "testing" @@ -59,7 +49,8 @@ func TestWriteUsersInPostgreSQL(t *testing.T) { return expected } - assert.Equal(t, expected, WriteUsersInPostgreSQL(ctx, exec, nil, nil)) + cluster := new(v1beta1.PostgresCluster) + assert.Equal(t, expected, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, nil)) }) t.Run("Empty", func(t *testing.T) { @@ -104,17 +95,19 @@ COMMIT;`)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, nil, nil)) + cluster := new(v1beta1.PostgresCluster) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, nil)) assert.Equal(t, calls, 1) - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, []v1beta1.PostgresUserSpec{}, nil)) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{}, nil)) assert.Equal(t, calls, 2) - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, nil, map[string]string{})) + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, nil, map[string]string{})) assert.Equal(t, calls, 3) }) t.Run("OptionalFields", func(t *testing.T) { + cluster := new(v1beta1.PostgresCluster) calls := 0 exec := func( _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, @@ -134,7 +127,7 @@ COMMIT;`)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{ { Name: "user-no-options", @@ -162,6 +155,7 @@ COMMIT;`)) t.Run("PostgresSuperuser", func(t *testing.T) { calls := 0 + cluster := new(v1beta1.PostgresCluster) exec := func( _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, ) error { @@ -177,7 +171,7 @@ COMMIT;`)) return nil } - assert.NilError(t, WriteUsersInPostgreSQL(ctx, exec, + assert.NilError(t, WriteUsersInPostgreSQL(ctx, cluster, exec, []v1beta1.PostgresUserSpec{ { Name: "postgres", @@ -192,3 +186,52 @@ COMMIT;`)) assert.Equal(t, calls, 1) }) } + +func TestWriteUsersSchemasInPostgreSQL(t *testing.T) { + ctx := context.Background() + + t.Run("Mixed users", func(t *testing.T) { + calls := 0 + exec := func( + _ context.Context, stdin io.Reader, _, _ io.Writer, command ...string, + ) error { + calls++ + + b, err := io.ReadAll(stdin) + assert.NilError(t, err) + + // The command strings will contain either of two possibilities, depending on the user called. + commands := strings.Join(command, ",") + re := regexp.MustCompile("--set=databases=\\[\"db1\"\\],--set=username=user-single-db|--set=databases=\\[\"db1\",\"db2\"\\],--set=username=user-multi-db") + assert.Assert(t, cmp.Regexp(re, commands)) + + assert.Assert(t, cmp.Contains(string(b), `CREATE SCHEMA IF NOT EXISTS :"username" AUTHORIZATION :"username";`)) + return nil + } + + assert.NilError(t, WriteUsersSchemasInPostgreSQL(ctx, exec, + []v1beta1.PostgresUserSpec{ + { + Name: "user-single-db", + Databases: []v1beta1.PostgresIdentifier{"db1"}, + }, + { + Name: "user-no-databases", + }, + { + Name: "user-multi-dbs", + Databases: []v1beta1.PostgresIdentifier{"db1", "db2"}, + }, + { + Name: "public", + Databases: []v1beta1.PostgresIdentifier{"db3"}, + }, + }, + )) + // The spec.users has four elements, but two will be skipped: + // * the user with the reserved name `public` + // * the user with 0 databases + assert.Equal(t, calls, 2) + }) + +} diff --git a/internal/postgres/wal.md b/internal/postgres/wal.md index dc1e0c54a2..afb094c20e 100644 --- a/internal/postgres/wal.md +++ b/internal/postgres/wal.md @@ -1,16 +1,7 @@ PostgreSQL commits transactions by storing changes in its [write-ahead log][WAL]. diff --git a/internal/registration/interface.go b/internal/registration/interface.go index a7fa28ff5f..578a064e2b 100644 --- a/internal/registration/interface.go +++ b/internal/registration/interface.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package registration diff --git a/internal/registration/runner.go b/internal/registration/runner.go index e34412c07d..0d607e1e94 100644 --- a/internal/registration/runner.go +++ b/internal/registration/runner.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package registration @@ -86,8 +76,14 @@ func NewRunner(publicKey, tokenPath string, changed func()) (*Runner, error) { } // CheckToken loads and verifies the configured token, returning an error when -// the file exists but cannot be verified. -func (r *Runner) CheckToken() error { +// the file exists but cannot be verified, and +// returning the token if it can be verified. +// NOTE(upgradecheck): return the token/nil so that we can use the token +// in upgradecheck; currently a refresh of the token will cause a restart of the pod +// meaning that the token used in upgradecheck is always the current token. +// But if the restart behavior changes, we might drop the token return in main.go +// and change upgradecheck to retrieve the token itself +func (r *Runner) CheckToken() (*jwt.Token, error) { data, errFile := os.ReadFile(r.tokenPath) key := func(*jwt.Token) (any, error) { return r.publicKey, nil } @@ -96,7 +92,7 @@ func (r *Runner) CheckToken() error { r.token.Lock() defer r.token.Unlock() - _, errToken := jwt.ParseWithClaims(string(data), &r.token, key, + token, errToken := jwt.ParseWithClaims(string(data), &r.token, key, jwt.WithExpirationRequired(), jwt.WithValidMethods([]string{"RS256"}), ) @@ -112,11 +108,11 @@ func (r *Runner) CheckToken() error { switch { case !r.enabled || !r.token.Exists: - return nil + return nil, nil case errFile != nil: - return errFile + return nil, errFile default: - return errToken + return token, errToken } } @@ -178,7 +174,7 @@ func (r *Runner) Start(ctx context.Context) error { select { case <-ticks: _, before := r.state() - if err := r.CheckToken(); err != nil { + if _, err := r.CheckToken(); err != nil { log.Error(err, "Unable to validate token") } if _, after := r.state(); before != after && r.changed != nil { diff --git a/internal/registration/runner_test.go b/internal/registration/runner_test.go index 28ef26c502..8e75848986 100644 --- a/internal/registration/runner_test.go +++ b/internal/registration/runner_test.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package registration @@ -111,19 +101,22 @@ func TestRunnerCheckToken(t *testing.T) { t.Run("SafeToCallDisabled", func(t *testing.T) { r := Runner{enabled: false} - assert.NilError(t, r.CheckToken()) + _, err := r.CheckToken() + assert.NilError(t, err) }) t.Run("FileMissing", func(t *testing.T) { r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} - assert.NilError(t, r.CheckToken()) + _, err := r.CheckToken() + assert.NilError(t, err) }) t.Run("FileUnreadable", func(t *testing.T) { r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o200)) // Writeable - assert.ErrorContains(t, r.CheckToken(), "permission") + _, err := r.CheckToken() + assert.ErrorContains(t, err, "permission") assert.Assert(t, r.token.ExpiresAt == nil) }) @@ -131,7 +124,8 @@ func TestRunnerCheckToken(t *testing.T) { r := Runner{enabled: true, tokenPath: filepath.Join(dir, "empty")} assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o400)) // Readable - assert.ErrorContains(t, r.CheckToken(), "malformed") + _, err := r.CheckToken() + assert.ErrorContains(t, err, "malformed") assert.Assert(t, r.token.ExpiresAt == nil) }) @@ -150,7 +144,8 @@ func TestRunnerCheckToken(t *testing.T) { assert.NilError(t, err) assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - assert.Assert(t, r.CheckToken() != nil, "HMAC algorithm should be rejected") + _, err = r.CheckToken() + assert.Assert(t, err != nil, "HMAC algorithm should be rejected") assert.Assert(t, r.token.ExpiresAt == nil) }) @@ -165,7 +160,7 @@ func TestRunnerCheckToken(t *testing.T) { assert.NilError(t, err) assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - err = r.CheckToken() + _, err = r.CheckToken() assert.ErrorContains(t, err, "exp claim is required") assert.Assert(t, r.token.ExpiresAt == nil) }) @@ -183,7 +178,7 @@ func TestRunnerCheckToken(t *testing.T) { assert.NilError(t, err) assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - err = r.CheckToken() + _, err = r.CheckToken() assert.ErrorContains(t, err, "is expired") assert.Assert(t, r.token.ExpiresAt == nil) }) @@ -195,14 +190,20 @@ func TestRunnerCheckToken(t *testing.T) { tokenPath: filepath.Join(dir, "valid"), } + expiration := jwt.NewNumericDate(time.Now().Add(time.Hour)) data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ - "exp": jwt.NewNumericDate(time.Now().Add(time.Hour)), + "exp": expiration, }).SignedString(key) assert.NilError(t, err) assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - assert.NilError(t, r.CheckToken()) + token, err := r.CheckToken() + assert.NilError(t, err) assert.Assert(t, r.token.ExpiresAt != nil) + assert.Assert(t, token.Valid) + exp, err := token.Claims.GetExpirationTime() + assert.NilError(t, err) + assert.Equal(t, exp.Time, expiration.Time) }) } @@ -557,7 +558,8 @@ func TestRunnerStart(t *testing.T) { // Begin with an invalid token. assert.NilError(t, os.WriteFile(runner.tokenPath, nil, 0o600)) - assert.Assert(t, runner.CheckToken() != nil) + _, err = runner.CheckToken() + assert.Assert(t, err != nil) // Replace it with a valid token. assert.NilError(t, os.WriteFile(runner.tokenPath, []byte(token), 0o600)) diff --git a/internal/registration/testing.go b/internal/registration/testing.go index fb9e9e4f4b..1418f6d2d3 100644 --- a/internal/registration/testing.go +++ b/internal/registration/testing.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package registration diff --git a/internal/testing/cmp/cmp.go b/internal/testing/cmp/cmp.go index 58e0a1e4de..265a598064 100644 --- a/internal/testing/cmp/cmp.go +++ b/internal/testing/cmp/cmp.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package cmp diff --git a/internal/testing/events/recorder.go b/internal/testing/events/recorder.go index 273a506521..23c03a4c40 100644 --- a/internal/testing/events/recorder.go +++ b/internal/testing/events/recorder.go @@ -1,17 +1,6 @@ -/* - Copyright 2022 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package events diff --git a/internal/testing/require/exec.go b/internal/testing/require/exec.go index 983bd49711..c182e84996 100644 --- a/internal/testing/require/exec.go +++ b/internal/testing/require/exec.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package require diff --git a/internal/testing/require/kubernetes.go b/internal/testing/require/kubernetes.go index 0829314692..df21bca058 100644 --- a/internal/testing/require/kubernetes.go +++ b/internal/testing/require/kubernetes.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package require @@ -121,6 +110,7 @@ func kubernetes3(t testing.TB) (*envtest.Environment, client.Client) { ErrorIfPathMissing: true, Paths: []string{ filepath.Join(base, "config", "crd", "bases"), + filepath.Join(base, "hack", "tools", "external-snapshotter", "client", "config", "crd"), }, Scheme: runtime.Scheme, }) diff --git a/internal/testing/require/parallel.go b/internal/testing/require/parallel.go index 72c8dbd932..4fbdf42284 100644 --- a/internal/testing/require/parallel.go +++ b/internal/testing/require/parallel.go @@ -1,17 +1,6 @@ -/* - Copyright 2022 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package require diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go index f05906af3e..e71ff22b2e 100644 --- a/internal/testing/validation/postgrescluster_test.go +++ b/internal/testing/validation/postgrescluster_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package validation diff --git a/internal/upgradecheck/header.go b/internal/upgradecheck/header.go index 401d03f7a0..a1d56ef442 100644 --- a/internal/upgradecheck/header.go +++ b/internal/upgradecheck/header.go @@ -1,17 +1,6 @@ -/* - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck @@ -19,6 +8,7 @@ import ( "context" "encoding/json" "net/http" + "os" googleuuid "github.com/google/uuid" corev1 "k8s.io/api/core/v1" @@ -28,6 +18,8 @@ import ( "k8s.io/client-go/rest" crclient "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -44,24 +36,36 @@ var ( // Extensible struct for client upgrade data type clientUpgradeData struct { - DeploymentID string `json:"deployment_id"` - KubernetesEnv string `json:"kubernetes_env"` - PGOClustersTotal int `json:"pgo_clusters_total"` - PGOVersion string `json:"pgo_version"` - IsOpenShift bool `json:"is_open_shift"` + BridgeClustersTotal int `json:"bridge_clusters_total"` + BuildSource string `json:"build_source"` + DeploymentID string `json:"deployment_id"` + FeatureGatesEnabled string `json:"feature_gates_enabled"` + IsOpenShift bool `json:"is_open_shift"` + KubernetesEnv string `json:"kubernetes_env"` + PGOClustersTotal int `json:"pgo_clusters_total"` + PGOInstaller string `json:"pgo_installer"` + PGOInstallerOrigin string `json:"pgo_installer_origin"` + PGOVersion string `json:"pgo_version"` + RegistrationToken string `json:"registration_token"` } // generateHeader aggregates data and returns a struct of that data // If any errors are encountered, it logs those errors and uses the default values func generateHeader(ctx context.Context, cfg *rest.Config, crClient crclient.Client, - pgoVersion string, isOpenShift bool) *clientUpgradeData { + pgoVersion string, isOpenShift bool, registrationToken string) *clientUpgradeData { return &clientUpgradeData{ - PGOVersion: pgoVersion, - IsOpenShift: isOpenShift, - DeploymentID: ensureDeploymentID(ctx, crClient), - PGOClustersTotal: getManagedClusters(ctx, crClient), - KubernetesEnv: getServerVersion(ctx, cfg), + BridgeClustersTotal: getBridgeClusters(ctx, crClient), + BuildSource: os.Getenv("BUILD_SOURCE"), + DeploymentID: ensureDeploymentID(ctx, crClient), + FeatureGatesEnabled: feature.ShowGates(ctx), + IsOpenShift: isOpenShift, + KubernetesEnv: getServerVersion(ctx, cfg), + PGOClustersTotal: getManagedClusters(ctx, crClient), + PGOInstaller: os.Getenv("PGO_INSTALLER"), + PGOInstallerOrigin: os.Getenv("PGO_INSTALLER_ORIGIN"), + PGOVersion: pgoVersion, + RegistrationToken: registrationToken, } } @@ -125,7 +129,7 @@ func manageUpgradeCheckConfigMap(ctx context.Context, crClient crclient.Client, } } - err = applyConfigMap(ctx, crClient, cm, currentID) + err = applyConfigMap(ctx, crClient, cm, postgrescluster.ControllerName) if err != nil { log.V(1).Info("upgrade check issue: could not apply configmap", "response", err.Error()) @@ -169,6 +173,22 @@ func getManagedClusters(ctx context.Context, crClient crclient.Client) int { return count } +// getBridgeClusters returns a count of Bridge clusters managed by this PGO instance +// Any errors encountered will be logged and the count result will be 0 +func getBridgeClusters(ctx context.Context, crClient crclient.Client) int { + var count int + clusters := &v1beta1.CrunchyBridgeClusterList{} + err := crClient.List(ctx, clusters) + if err != nil { + log := logging.FromContext(ctx) + log.V(1).Info("upgrade check issue: could not count bridge clusters", + "response", err.Error()) + } else { + count = len(clusters.Items) + } + return count +} + // getServerVersion returns the stringified server version (i.e., the same info `kubectl version` // returns for the server) // Any errors encountered will be logged and will return an empty string diff --git a/internal/upgradecheck/header_test.go b/internal/upgradecheck/header_test.go index f884af3cda..c144e7629b 100644 --- a/internal/upgradecheck/header_test.go +++ b/internal/upgradecheck/header_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck @@ -19,6 +8,7 @@ import ( "context" "encoding/json" "net/http" + "strings" "testing" "gotest.tools/v3/assert" @@ -31,6 +21,7 @@ import ( "k8s.io/client-go/rest" "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/internal/testing/require" @@ -50,6 +41,10 @@ func TestGenerateHeader(t *testing.T) { reconciler := postgrescluster.Reconciler{Client: cc} + t.Setenv("PGO_INSTALLER", "test") + t.Setenv("PGO_INSTALLER_ORIGIN", "test-origin") + t.Setenv("BUILD_SOURCE", "developer") + t.Run("error ensuring ID", func(t *testing.T) { fakeClientWithOptionalError := &fakeClientWithError{ cc, "patch error", @@ -57,7 +52,7 @@ func TestGenerateHeader(t *testing.T) { ctx, calls := setupLogCapture(ctx) res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift) + "1.2.3", reconciler.IsOpenShift, "") assert.Equal(t, len(*calls), 1) assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) @@ -66,8 +61,15 @@ func TestGenerateHeader(t *testing.T) { err := cc.List(ctx, &pgoList) assert.NilError(t, err) assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) + bridgeList := v1beta1.CrunchyBridgeClusterList{} + err = cc.List(ctx, &bridgeList) + assert.NilError(t, err) + assert.Equal(t, len(bridgeList.Items), res.BridgeClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) t.Run("error getting cluster count", func(t *testing.T) { @@ -77,14 +79,21 @@ func TestGenerateHeader(t *testing.T) { ctx, calls := setupLogCapture(ctx) res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count postgres clusters`)) + "1.2.3", reconciler.IsOpenShift, "") + assert.Equal(t, len(*calls), 2) + // Aggregating the logs since we cannot determine which call will be first + callsAggregate := strings.Join(*calls, " ") + assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count postgres clusters`)) + assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count bridge clusters`)) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) assert.Equal(t, deploymentID, res.DeploymentID) assert.Equal(t, 0, res.PGOClustersTotal) + assert.Equal(t, 0, res.BridgeClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) t.Run("error getting server version info", func(t *testing.T) { @@ -92,7 +101,7 @@ func TestGenerateHeader(t *testing.T) { badcfg := &rest.Config{} res := generateHeader(ctx, badcfg, cc, - "1.2.3", reconciler.IsOpenShift) + "1.2.3", reconciler.IsOpenShift, "") assert.Equal(t, len(*calls), 1) assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) @@ -103,13 +112,21 @@ func TestGenerateHeader(t *testing.T) { assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, "", res.KubernetesEnv) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) t.Run("success", func(t *testing.T) { ctx, calls := setupLogCapture(ctx) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx = feature.NewContext(ctx, gate) res := generateHeader(ctx, cfg, cc, - "1.2.3", reconciler.IsOpenShift) + "1.2.3", reconciler.IsOpenShift, "") assert.Equal(t, len(*calls), 0) assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) assert.Equal(t, deploymentID, res.DeploymentID) @@ -119,6 +136,10 @@ func TestGenerateHeader(t *testing.T) { assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) assert.Equal(t, "1.2.3", res.PGOVersion) assert.Equal(t, server.String(), res.KubernetesEnv) + assert.Equal(t, "TablespaceVolumes=true", res.FeatureGatesEnabled) + assert.Equal(t, "test", res.PGOInstaller) + assert.Equal(t, "test-origin", res.PGOInstallerOrigin) + assert.Equal(t, "developer", res.BuildSource) }) } @@ -511,12 +532,35 @@ func TestGetManagedClusters(t *testing.T) { } ctx, calls := setupLogCapture(ctx) count := getManagedClusters(ctx, fakeClientWithOptionalError) - assert.Equal(t, len(*calls), 1) + assert.Assert(t, len(*calls) > 0) assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count postgres clusters`)) assert.Assert(t, count == 0) }) } +func TestGetBridgeClusters(t *testing.T) { + ctx := context.Background() + + t.Run("success", func(t *testing.T) { + fakeClient := setupFakeClientWithPGOScheme(t, true) + ctx, calls := setupLogCapture(ctx) + count := getBridgeClusters(ctx, fakeClient) + assert.Equal(t, len(*calls), 0) + assert.Assert(t, count == 2) + }) + + t.Run("list throw error", func(t *testing.T) { + fakeClientWithOptionalError := &fakeClientWithError{ + setupFakeClientWithPGOScheme(t, true), "list error", + } + ctx, calls := setupLogCapture(ctx) + count := getBridgeClusters(ctx, fakeClientWithOptionalError) + assert.Assert(t, len(*calls) > 0) + assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count bridge clusters`)) + assert.Assert(t, count == 0) + }) +} + func TestGetServerVersion(t *testing.T) { t.Run("success", func(t *testing.T) { expect, server := setupVersionServer(t, true) diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go index 6d59881d66..63184184db 100644 --- a/internal/upgradecheck/helpers_test.go +++ b/internal/upgradecheck/helpers_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck @@ -38,20 +27,23 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +// fakeClientWithError is a controller runtime client and an error type to force type fakeClientWithError struct { crclient.Client errorType string } -func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object) error { +// Get returns the client.get OR an Error (`get error`) if the fakeClientWithError is set to error that way +func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object, opts ...crclient.GetOption) error { switch f.errorType { case "get error": return fmt.Errorf("get error") default: - return f.Client.Get(ctx, key, obj) + return f.Client.Get(ctx, key, obj, opts...) } } +// Patch returns the client.get OR an Error (`patch error`) if the fakeClientWithError is set to error that way // TODO: PatchType is not supported currently by fake // - https://github.com/kubernetes/client-go/issues/970 // Once that gets fixed, we can test without envtest @@ -65,6 +57,7 @@ func (f *fakeClientWithError) Patch(ctx context.Context, obj crclient.Object, } } +// List returns the client.get OR an Error (`list error`) if the fakeClientWithError is set to error that way func (f *fakeClientWithError) List(ctx context.Context, objList crclient.ObjectList, opts ...crclient.ListOption) error { switch f.errorType { @@ -75,12 +68,16 @@ func (f *fakeClientWithError) List(ctx context.Context, objList crclient.ObjectL } } +// setupDeploymentID returns a UUID func setupDeploymentID(t *testing.T) string { t.Helper() deploymentID = string(uuid.NewUUID()) return deploymentID } +// setupFakeClientWithPGOScheme returns a fake client with the PGO scheme added; +// if `includeCluster` is true, also adds some empty PostgresCluster and CrunchyBridgeCluster +// items to the client func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Client { t.Helper() if includeCluster { @@ -98,11 +95,31 @@ func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Cl }, }, } - return fake.NewClientBuilder().WithScheme(runtime.Scheme).WithLists(pc).Build() + + bcl := &v1beta1.CrunchyBridgeClusterList{ + Items: []v1beta1.CrunchyBridgeCluster{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "hippo", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "elephant", + }, + }, + }, + } + + return fake.NewClientBuilder(). + WithScheme(runtime.Scheme). + WithLists(pc, bcl). + Build() } return fake.NewClientBuilder().WithScheme(runtime.Scheme).Build() } +// setupVersionServer sets up and tears down a server and version info for testing func setupVersionServer(t *testing.T, works bool) (version.Info, *httptest.Server) { t.Helper() expect := version.Info{ @@ -127,6 +144,7 @@ func setupVersionServer(t *testing.T, works bool) (version.Info, *httptest.Serve return expect, server } +// setupLogCapture captures the logs and keeps count of the logs captured func setupLogCapture(ctx context.Context) (context.Context, *[]string) { calls := []string{} testlog := funcr.NewJSON(func(object string) { diff --git a/internal/upgradecheck/http.go b/internal/upgradecheck/http.go index 6e05499490..71a3c465c0 100644 --- a/internal/upgradecheck/http.go +++ b/internal/upgradecheck/http.go @@ -1,17 +1,6 @@ -/* - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck @@ -22,6 +11,7 @@ import ( "net/http" "time" + "github.com/golang-jwt/jwt/v5" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" crclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -77,7 +67,7 @@ func init() { func checkForUpgrades(ctx context.Context, url, versionString string, backoff wait.Backoff, crclient crclient.Client, cfg *rest.Config, - isOpenShift bool) (message string, header string, err error) { + isOpenShift bool, registrationToken string) (message string, header string, err error) { var headerPayloadStruct *clientUpgradeData // Prep request @@ -86,7 +76,7 @@ func checkForUpgrades(ctx context.Context, url, versionString string, backoff wa // generateHeader always returns some sort of struct, using defaults/nil values // in case some of the checks return errors headerPayloadStruct = generateHeader(ctx, cfg, crclient, - versionString, isOpenShift) + versionString, isOpenShift, registrationToken) req, err = addHeader(req, headerPayloadStruct) } @@ -136,24 +126,37 @@ type CheckForUpgradesScheduler struct { Client crclient.Client Config *rest.Config - OpenShift bool - Refresh time.Duration - URL, Version string + OpenShift bool + Refresh time.Duration + RegistrationToken string + URL, Version string } // ManagedScheduler creates a [CheckForUpgradesScheduler] and adds it to m. -func ManagedScheduler(m manager.Manager, openshift bool, url, version string) error { +// NOTE(registration): This takes a token/nil parameter when the operator is started. +// Currently the operator restarts when the token is updated, +// so this token is always current; but if that restart behavior is changed, +// we will want the upgrade mechanism to instantiate its own registration runner +// or otherwise get the most recent token. +func ManagedScheduler(m manager.Manager, openshift bool, + url, version string, registrationToken *jwt.Token) error { if url == "" { url = upgradeCheckURL } + var token string + if registrationToken != nil { + token = registrationToken.Raw + } + return m.Add(&CheckForUpgradesScheduler{ - Client: m.GetClient(), - Config: m.GetConfig(), - OpenShift: openshift, - Refresh: 24 * time.Hour, - URL: url, - Version: version, + Client: m.GetClient(), + Config: m.GetConfig(), + OpenShift: openshift, + Refresh: 24 * time.Hour, + RegistrationToken: token, + URL: url, + Version: version, }) } @@ -188,7 +191,7 @@ func (s *CheckForUpgradesScheduler) check(ctx context.Context) { }() info, header, err := checkForUpgrades(ctx, - s.URL, s.Version, backoff, s.Client, s.Config, s.OpenShift) + s.URL, s.Version, backoff, s.Client, s.Config, s.OpenShift, s.RegistrationToken) if err != nil { log.V(1).Info("could not complete upgrade check", "response", err.Error()) diff --git a/internal/upgradecheck/http_test.go b/internal/upgradecheck/http_test.go index b2264f4b9b..9535f942ea 100644 --- a/internal/upgradecheck/http_test.go +++ b/internal/upgradecheck/http_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package upgradecheck @@ -32,6 +21,7 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/manager" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/testing/cmp" ) @@ -58,10 +48,16 @@ func (m *MockClient) Do(req *http.Request) (*http.Response, error) { } func TestCheckForUpgrades(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, false) - ctx := logging.NewContext(context.Background(), logging.Discard()) + fakeClient := setupFakeClientWithPGOScheme(t, true) cfg := &rest.Config{} + ctx := logging.NewContext(context.Background(), logging.Discard()) + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.TablespaceVolumes: true, + })) + ctx = feature.NewContext(ctx, gate) + // Pass *testing.T to allows the correct messages from the assert package // in the event of certain failures. checkData := func(t *testing.T, header string) { @@ -70,6 +66,10 @@ func TestCheckForUpgrades(t *testing.T) { assert.NilError(t, err) assert.Assert(t, data.DeploymentID != "") assert.Equal(t, data.PGOVersion, "4.7.3") + assert.Equal(t, data.RegistrationToken, "speakFriend") + assert.Equal(t, data.BridgeClustersTotal, 2) + assert.Equal(t, data.PGOClustersTotal, 2) + assert.Equal(t, data.FeatureGatesEnabled, "TablespaceVolumes=true") } t.Run("success", func(t *testing.T) { @@ -83,7 +83,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") assert.NilError(t, err) assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) checkData(t, header) @@ -98,7 +98,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") // Two failed calls because of env var assert.Equal(t, counter, 2) assert.Equal(t, res, "") @@ -118,7 +118,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") assert.Equal(t, res, "") // Two failed calls because of env var assert.Equal(t, counter, 2) @@ -147,7 +147,7 @@ func TestCheckForUpgrades(t *testing.T) { } res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false) + fakeClient, cfg, false, "speakFriend") assert.Equal(t, counter, 2) assert.NilError(t, err) assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) diff --git a/internal/util/README.md b/internal/util/README.md deleted file mode 100644 index f71793f3ae..0000000000 --- a/internal/util/README.md +++ /dev/null @@ -1,120 +0,0 @@ - - - -## Feature Gates - -Feature gates allow users to enable or disable -certain features by setting the "PGO_FEATURE_GATES" environment -variable to a list similar to "feature1=true,feature2=false,..." -in the PGO Deployment. - -This capability leverages the relevant Kubernetes packages. Documentation and -code implementation examples are given below. - -- Documentation: - - https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ - -- Package Information: - - https://pkg.go.dev/k8s.io/component-base@v0.20.1/featuregate - -- Adding the feature gate key: - - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L27 - -- Adding the feature gate to the known features map: - - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L729-732 - -- Adding features to the featureGate - - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L110-L111 - -- Setting the feature gates - - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L105-L107 - -## Developing with Feature Gates in PGO - -To add a new feature gate, a few steps are required. First, in -`internal/util/features.go`, you will add a feature gate key name. As an example, -for a new feature called 'FeatureName', you would add a new constant and comment -describing what the feature gate controls at the top of the file, similar to -``` -// Enables FeatureName in PGO -FeatureName featuregate.Feature = "FeatureName" -``` - -Next, add a new entry to the `pgoFeatures` map -``` -var pgoFeatures = map[featuregate.Feature]featuregate.FeatureSpec{ - FeatureName: {Default: false, PreRelease: featuregate.Alpha}, -} -``` -where `FeatureName` is the constant defined previously, `Default: false` sets the -default behavior and `PreRelease: featuregate.Alpha`. The possible `PreRelease` -values are `Alpha`, `Beta`, `GA` and `Deprecated`. - -- https://pkg.go.dev/k8s.io/component-base@v0.20.1/featuregate#pkg-constants - -By Kubernetes convention, `Alpha` features have almost always been disabled by -default. `Beta` features are generally enabled by default. - -- https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages - -Prior to Kubernetes 1.24, both `Beta` features and APIs were enabled by default. -Starting in v1.24, new `Beta` APIs are generally disabled by default, while `Beta` -features remain enabled by default. - -- https://kubernetes.io/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/#kubernetes-api-removals -- https://kubernetes.io/blog/2022/05/03/kubernetes-1-24-release-announcement/#beta-apis-off-by-default -- https://github.com/kubernetes/enhancements/tree/master/keps/sig-architecture/3136-beta-apis-off-by-default#goals - -For consistency with Kubernetes, we recommend that feature-gated features be -configured as `Alpha` and disabled by default. Any `Beta` features added should -stay consistent with Kubernetes practice and be enabled by default, but we should -keep an eye out for changes to these standards and adjust as needed. - -Once the above items are set, you can then use your feature gated value in the -code base to control feature behavior using something like -``` -if util.DefaultMutableFeatureGate.Enabled(util.FeatureName) -``` - -To test the feature gate, set the `PGO_FEATURE_GATES` environment variable to -enable the new feature as follows -``` -PGO_FEATURE_GATES="FeatureName=true" -``` -Note that for more than one feature, this variable accepts a comma delimited -list, e.g. -``` -PGO_FEATURE_GATES="FeatureName=true,FeatureName2=true,FeatureName3=true" -``` - -While `PGO_FEATURE_GATES` does not have to be set, please note that the features -must be defined before use, otherwise PGO deployment will fail with the -following message -`panic: unable to parse and store configured feature gates. unrecognized feature gate` - -Also, the features must have boolean values, otherwise you will see -`panic: unable to parse and store configured feature gates. invalid value` - -When dealing with tests that do not invoke `cmd/postgres-operator/main.go`, keep -in mind that you will need to ensure that you invoke the `AddAndSetFeatureGates` -function. Otherwise, any test that references the undefined feature gate will fail -with a panic message similar to -"feature "FeatureName" is not registered in FeatureGate" - -To correct for this, you simply need a line similar to -``` -err := util.AddAndSetFeatureGates("") -``` diff --git a/internal/util/features.go b/internal/util/features.go deleted file mode 100644 index d266a3d76b..0000000000 --- a/internal/util/features.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package util - -import ( - "fmt" - - "k8s.io/component-base/featuregate" -) - -const ( - // Every feature gate should add a key here following this template: - // - // // Enables FeatureName... - // FeatureName featuregate.Feature = "FeatureName" - // - // - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L27 - // - // Feature gates should be listed in alphabetical, case-sensitive - // (upper before any lower case character) order. - // - // Enables support of appending custom queries to default PGMonitor queries - AppendCustomQueries featuregate.Feature = "AppendCustomQueries" - // - BridgeIdentifiers featuregate.Feature = "BridgeIdentifiers" - // - // Enables support of custom sidecars for PostgreSQL instance Pods - InstanceSidecars featuregate.Feature = "InstanceSidecars" - // - // Enables support of custom sidecars for pgBouncer Pods - PGBouncerSidecars featuregate.Feature = "PGBouncerSidecars" - // - // Enables support of tablespace volumes - TablespaceVolumes featuregate.Feature = "TablespaceVolumes" -) - -// pgoFeatures consists of all known PGO feature keys. -// To add a new feature, define a key for it above and add it here. -// An example entry is as follows: -// -// FeatureName: {Default: false, PreRelease: featuregate.Alpha}, -// -// - https://releases.k8s.io/v1.20.0/pkg/features/kube_features.go#L729-732 -var pgoFeatures = map[featuregate.Feature]featuregate.FeatureSpec{ - AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, - BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, - InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, - PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, - TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, -} - -// DefaultMutableFeatureGate is a mutable, shared global FeatureGate. -// It is used to indicate whether a given feature is enabled or not. -// -// - https://pkg.go.dev/k8s.io/apiserver/pkg/util/feature -// - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go#L24-L28 -var DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate() - -// AddAndSetFeatureGates utilizes the Kubernetes feature gate packages to first -// add the default PGO features to the featureGate and then set the values provided -// via the 'PGO_FEATURE_GATES' environment variable. This function expects a string -// like feature1=true,feature2=false,... -// -// - https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ -// - https://pkg.go.dev/k8s.io/component-base@v0.20.1/featuregate -func AddAndSetFeatureGates(features string) error { - // Add PGO features to the featureGate - // - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L110-L111 - if err := DefaultMutableFeatureGate.Add(pgoFeatures); err != nil { - return fmt.Errorf("unable to add PGO features to the featureGate. %w", err) - } - - // Set the feature gates from environment variable config - // - https://releases.k8s.io/v1.20.0/staging/src/k8s.io/component-base/featuregate/feature_gate.go#L105-L107 - if err := DefaultMutableFeatureGate.Set(features); err != nil { - return fmt.Errorf("unable to parse and store configured feature gates. %w", err) - } - return nil -} diff --git a/internal/util/features_test.go b/internal/util/features_test.go deleted file mode 100644 index 4fa7c34274..0000000000 --- a/internal/util/features_test.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package util - -import ( - "testing" - - "gotest.tools/v3/assert" - "k8s.io/component-base/featuregate" -) - -func TestAddAndSetFeatureGates(t *testing.T) { - - // set test features - const TestGate1 featuregate.Feature = "TestGate1" - const TestGate2 featuregate.Feature = "TestGate2" - const TestGate3 featuregate.Feature = "TestGate3" - - pgoFeatures = map[featuregate.Feature]featuregate.FeatureSpec{ - TestGate1: {Default: false, PreRelease: featuregate.Beta}, - TestGate2: {Default: false, PreRelease: featuregate.Beta}, - TestGate3: {Default: false, PreRelease: featuregate.Beta}, - } - - t.Run("No feature gates set", func(t *testing.T) { - err := AddAndSetFeatureGates("") - assert.NilError(t, err) - }) - - t.Run("One feature gate set", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true") - assert.NilError(t, err) - }) - - t.Run("Two feature gates set", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true,TestGate3=true") - assert.NilError(t, err) - }) - - t.Run("All available feature gates set", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true,TestGate2=true,TestGate3=true") - assert.NilError(t, err) - }) - - t.Run("One unrecognized gate set", func(t *testing.T) { - err := AddAndSetFeatureGates("NotAGate=true") - assert.ErrorContains(t, err, "unrecognized feature gate: NotAGate") - }) - - t.Run("One recognized gate, one unrecognized gate", func(t *testing.T) { - err := AddAndSetFeatureGates("TestGate1=true,NotAGate=true") - assert.ErrorContains(t, err, "unrecognized feature gate: NotAGate") - }) - - t.Run("Gate value not set", func(t *testing.T) { - err := AddAndSetFeatureGates("GateNotSet") - assert.ErrorContains(t, err, "missing bool value for GateNotSet") - }) - - t.Run("Gate value not boolean", func(t *testing.T) { - err := AddAndSetFeatureGates("GateNotSet=foo") - assert.ErrorContains(t, err, "invalid value of GateNotSet=foo, err: strconv.ParseBool") - }) -} diff --git a/internal/util/secrets.go b/internal/util/secrets.go index 203f6bcfea..82768c9386 100644 --- a/internal/util/secrets.go +++ b/internal/util/secrets.go @@ -1,17 +1,6 @@ -/* - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package util diff --git a/internal/util/secrets_test.go b/internal/util/secrets_test.go index 452c697477..5d549ca89e 100644 --- a/internal/util/secrets_test.go +++ b/internal/util/secrets_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package util @@ -65,7 +54,7 @@ func TestGenerateAlphaNumericPassword(t *testing.T) { assert.Assert(t, cmp.Regexp(`^[A-Za-z0-9]*$`, password)) } - previous := sets.String{} + previous := sets.Set[string]{} for i := 0; i < 10; i++ { password, err := GenerateAlphaNumericPassword(5) @@ -90,7 +79,7 @@ func TestGenerateASCIIPassword(t *testing.T) { } } - previous := sets.String{} + previous := sets.Set[string]{} for i := 0; i < 10; i++ { password, err := GenerateASCIIPassword(5) diff --git a/internal/util/util.go b/internal/util/util.go index 2199b584fd..72634ebbc6 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,17 +1,6 @@ -/* - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package util diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go index c72ca07471..0b94a4dae1 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -34,7 +23,7 @@ type CrunchyBridgeClusterSpec struct { // Whether the cluster is protected. Protected clusters can't be destroyed until // their protected flag is removed - // +optional + // +kubebuilder:validation:Optional IsProtected bool `json:"isProtected,omitempty"` // The name of the cluster @@ -53,10 +42,10 @@ type CrunchyBridgeClusterSpec struct { Plan string `json:"plan"` // The ID of the cluster's major Postgres version. - // Currently Bridge offers 13-16 + // Currently Bridge offers 13-17 // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=13 - // +kubebuilder:validation:Maximum=16 + // +kubebuilder:validation:Maximum=17 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 PostgresVersion int `json:"majorVersion"` @@ -76,14 +65,14 @@ type CrunchyBridgeClusterSpec struct { // are retrieved from the Bridge API. An empty list creates no role secrets. // Removing a role from this list does NOT drop the role nor revoke their // access, but it will delete that role's secret from the kube cluster. + // +kubebuilder:validation:Optional // +listType=map // +listMapKey=name - // +optional Roles []*CrunchyBridgeClusterRoleSpec `json:"roles,omitempty"` // The name of the secret containing the API key and team id // +kubebuilder:validation:Required - Secret string `json:"secret,omitempty"` + Secret string `json:"secret"` // The amount of storage available to the cluster in gigabytes. // The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. @@ -97,9 +86,11 @@ type CrunchyBridgeClusterSpec struct { type CrunchyBridgeClusterRoleSpec struct { // Name of the role within Crunchy Bridge. // More info: https://docs.crunchybridge.com/concepts/users + // +kubebuilder:validation:Required Name string `json:"name"` // The name of the Secret that will hold the role credentials. + // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Type=string diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go index 0c8e247bbd..15773a1815 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Package v1beta1 contains API Schema definitions for the postgres-operator v1beta1 API group // +kubebuilder:object:generate=true diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go index 111c4fb805..2f01399372 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go index 6f83b713c9..06c7321bc4 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go index 9aef438408..3e3098a602 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -60,15 +49,15 @@ type PGBackRestJobStatus struct { type PGBackRestScheduledBackupStatus struct { // The name of the associated pgBackRest scheduled backup CronJob - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional CronJobName string `json:"cronJobName,omitempty"` // The name of the associated pgBackRest repository - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional RepoName string `json:"repo,omitempty"` // The pgBackRest backup type for this Job - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional Type string `json:"type,omitempty"` // Represents the time the manual backup Job was acknowledged by the Job controller. @@ -353,7 +342,20 @@ type RepoHostStatus struct { type RepoPVC struct { // Defines a PersistentVolumeClaim spec used to create and/or bind a volume + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` VolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"volumeClaimSpec"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go index 38a4eebd2d..e940a9300d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go index 000ea72ba0..f2cd78335a 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go index 1b221abe5f..8e99f8239f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go @@ -1,16 +1,6 @@ // Copyright 2021 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -58,8 +48,8 @@ type PGUpgradeSpec struct { // The major version of PostgreSQL before the upgrade. // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=10 - // +kubebuilder:validation:Maximum=16 + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 FromPostgresVersion int `json:"fromPostgresVersion"` // TODO(benjaminjb): define webhook validation to make sure @@ -69,8 +59,8 @@ type PGUpgradeSpec struct { // The major version of PostgreSQL to be upgraded to. // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=10 - // +kubebuilder:validation:Maximum=16 + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 ToPostgresVersion int `json:"toPostgresVersion"` // The image name to use for PostgreSQL containers after upgrade. diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go index ff792ea986..b7baa72942 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go index bfb8892ed4..83396902d0 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 4200e5853a..54e42baa3b 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 @@ -33,8 +22,8 @@ type PostgresClusterSpec struct { DataSource *DataSource `json:"dataSource,omitempty"` // PostgreSQL backup configuration - // +kubebuilder:validation:Required - Backups Backups `json:"backups"` + // +optional + Backups Backups `json:"backups,omitempty"` // The secret containing the Certificates and Keys to encrypt PostgreSQL // traffic will need to contain the server TLS certificate, TLS key and the @@ -122,8 +111,8 @@ type PostgresClusterSpec struct { // The major version of PostgreSQL installed in the PostgreSQL image // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=10 - // +kubebuilder:validation:Maximum=16 + // +kubebuilder:validation:Minimum=11 + // +kubebuilder:validation:Maximum=17 // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 PostgresVersion int `json:"postgresVersion"` @@ -166,7 +155,17 @@ type PostgresClusterSpec struct { // A list of group IDs applied to the process of a container. These can be // useful when accessing shared file systems with constrained permissions. // More info: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context - // +optional + // --- + // +kubebuilder:validation:Optional + // + // Containers should not run with a root GID. + // - https://kubernetes.io/docs/concepts/security/pod-security-standards/ + // +kubebuilder:validation:items:Minimum=1 + // + // Supplementary GIDs must fit within int32. + // - https://releases.k8s.io/v1.18.0/pkg/apis/core/validation/validation.go#L3659-L3663 + // - https://releases.k8s.io/v1.22.0/pkg/apis/core/validation/validation.go#L3923-L3927 + // +kubebuilder:validation:items:Maximum=2147483647 SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` // Users to create inside PostgreSQL and the databases they should access. @@ -322,8 +321,12 @@ func (s *PostgresClusterSpec) Default() { type Backups struct { // pgBackRest archive configuration - // +kubebuilder:validation:Required + // +optional PGBackRest PGBackRestArchive `json:"pgbackrest"` + + // VolumeSnapshot configuration + // +optional + Snapshots *VolumeSnapshots `json:"snapshots,omitempty"` } // PostgresClusterStatus defines the observed state of PostgresCluster @@ -447,7 +450,20 @@ type PostgresInstanceSetSpec struct { // Defines a PersistentVolumeClaim for PostgreSQL data. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` // Priority class name for the PostgreSQL pod. Changing this value causes @@ -488,7 +504,20 @@ type PostgresInstanceSetSpec struct { // Defines a separate PersistentVolumeClaim for PostgreSQL's write-ahead log. // More info: https://www.postgresql.org/docs/current/wal.html - // +optional + // --- + // +kubebuilder:validation:Optional + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` WALVolumeClaimSpec *corev1.PersistentVolumeClaimSpec `json:"walVolumeClaimSpec,omitempty"` // The list of tablespaces volumes to mount for this postgrescluster @@ -517,7 +546,20 @@ type TablespaceVolume struct { // Defines a PersistentVolumeClaim for a tablespace. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + // --- // +kubebuilder:validation:Required + // + // NOTE(validation): Every PVC must have at least one accessMode. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.accessModes`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2098-L2100 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2292-L2294 + // +kubebuilder:validation:XValidation:rule=`has(self.accessModes) && size(self.accessModes) > 0`,message=`missing accessModes` + // + // NOTE(validation): Every PVC must have a positive storage request. NOTE(KEP-4153) + // TODO(k8s-1.28): fieldPath=`.resources.requests.storage`,reason="FieldValueRequired" + // - https://releases.k8s.io/v1.25.0/pkg/apis/core/validation/validation.go#L2126-L2133 + // - https://releases.k8s.io/v1.31.0/pkg/apis/core/validation/validation.go#L2318-L2325 + // +kubebuilder:validation:XValidation:rule=`has(self.resources) && has(self.resources.requests) && has(self.resources.requests.storage)`,message=`missing storage request` DataVolumeClaimSpec corev1.PersistentVolumeClaimSpec `json:"dataVolumeClaimSpec"` } @@ -554,6 +596,10 @@ type PostgresInstanceSetStatus struct { // Total number of pods that have the desired specification. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` + + // Desired Size of the pgData volume + // +optional + DesiredPGDataVolume map[string]string `json:"desiredPGDataVolume,omitempty"` } // PostgresProxySpec is a union of the supported PostgreSQL proxies. @@ -692,3 +738,11 @@ func NewPostgresCluster() *PostgresCluster { cluster.SetGroupVersionKind(GroupVersion.WithKind("PostgresCluster")) return cluster } + +// VolumeSnapshots defines the configuration for VolumeSnapshots +type VolumeSnapshots struct { + // Name of the VolumeSnapshotClass that should be used by VolumeSnapshots + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + VolumeSnapshotClassName string `json:"volumeSnapshotClassName"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index d34316123d..1dc4e3627e 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -1,17 +1,6 @@ -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go index cc5749e9ec..96cd4da073 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go @@ -1,17 +1,6 @@ -/* - Copyright 2022 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go index 9b64476b64..4fbc90a3b9 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go @@ -1,16 +1,6 @@ // Copyright 2023 - 2024 Crunchy Data Solutions, Inc. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package v1beta1 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index 69562e1cc0..fa32069d0f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -1,20 +1,8 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated -/* - Copyright 2021 - 2024 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 // Code generated by controller-gen. DO NOT EDIT. @@ -87,6 +75,11 @@ func (in *BackupJobs) DeepCopy() *BackupJobs { func (in *Backups) DeepCopyInto(out *Backups) { *out = *in in.PGBackRest.DeepCopyInto(&out.PGBackRest) + if in.Snapshots != nil { + in, out := &in.Snapshots, &out.Snapshots + *out = new(VolumeSnapshots) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backups. @@ -1786,7 +1779,9 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { if in.InstanceSets != nil { in, out := &in.InstanceSets, &out.InstanceSets *out = make([]PostgresInstanceSetStatus, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } in.Patroni.DeepCopyInto(&out.Patroni) if in.PGBackRest != nil { @@ -1913,6 +1908,13 @@ func (in *PostgresInstanceSetSpec) DeepCopy() *PostgresInstanceSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresInstanceSetStatus) DeepCopyInto(out *PostgresInstanceSetStatus) { *out = *in + if in.DesiredPGDataVolume != nil { + in, out := &in.DesiredPGDataVolume, &out.DesiredPGDataVolume + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresInstanceSetStatus. @@ -2184,12 +2186,12 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(corev1.ServiceInternalTrafficPolicyType) + *out = new(corev1.ServiceInternalTrafficPolicy) **out = **in } if in.ExternalTrafficPolicy != nil { in, out := &in.ExternalTrafficPolicy, &out.ExternalTrafficPolicy - *out = new(corev1.ServiceExternalTrafficPolicyType) + *out = new(corev1.ServiceExternalTrafficPolicy) **out = **in } } @@ -2308,3 +2310,18 @@ func (in *UserInterfaceSpec) DeepCopy() *UserInterfaceSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshots) DeepCopyInto(out *VolumeSnapshots) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshots. +func (in *VolumeSnapshots) DeepCopy() *VolumeSnapshots { + if in == nil { + return nil + } + out := new(VolumeSnapshots) + in.DeepCopyInto(out) + return out +} diff --git a/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml new file mode 100644 index 0000000000..b4372b75e7 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml @@ -0,0 +1,7 @@ +# Ensure that the default StorageClass supports VolumeExpansion +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" +allowVolumeExpansion: true diff --git a/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml b/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml new file mode 100644 index 0000000000..fc947a538f --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/01-create-cluster.yaml +assert: +- files/01-cluster-and-pvc-created.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml new file mode 100644 index 0000000000..261c274a51 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/02-create-data.yaml +assert: +- files/02-create-data-completed.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml new file mode 100644 index 0000000000..ad31b61401 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml @@ -0,0 +1,12 @@ +--- +# Check that annotation is set +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: auto-grow-volume-ha + annotations: + suggested-pgdata-pvc-size: 1461Mi diff --git a/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml new file mode 100644 index 0000000000..d486f9de18 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml @@ -0,0 +1,19 @@ +# We know that the PVC sizes have changed so now we can check that they have been +# updated to have the expected size +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1461Mi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml b/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml new file mode 100644 index 0000000000..475177d242 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # Verify expected event has occurred + - script: | + EVENT=$( + kubectl get events --namespace="${NAMESPACE}" \ + --field-selector reason="VolumeAutoGrow" --output=jsonpath={.items..message} + ) + + if [[ "${EVENT}" != "pgData volume expansion to 1461Mi requested for auto-grow-volume/instance1." ]]; then exit 1; fi diff --git a/testing/kuttl/e2e-other/autogrow-volume/README.md b/testing/kuttl/e2e-other/autogrow-volume/README.md new file mode 100644 index 0000000000..674bc69b40 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/README.md @@ -0,0 +1,9 @@ +### AutoGrow Volume + +* 00: Assert the storage class allows volume expansion +* 01: Create and verify PostgresCluster and PVC +* 02: Add data to trigger growth and verify Job completes +* 03: Verify annotation on the instance Pod +* 04: Verify the PVC request has been set and the PVC has grown +* 05: Verify the expansion request Event has been created + Note: This Event should be created between steps 03 and 04 but is checked at the end for timing purposes. diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml new file mode 100644 index 0000000000..17804b8205 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: auto-grow-volume +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: auto-grow-volume + postgres-operator.crunchydata.com/instance-set: instance1 +spec: + resources: + requests: + storage: 1Gi +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml new file mode 100644 index 0000000000..01eaf7a684 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: auto-grow-volume +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + limits: + storage: 2Gi + backups: + pgbackrest: + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml similarity index 62% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml rename to testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml index 56289c35c1..fdb42e68f5 100644 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/17-assert.yaml +++ b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml @@ -2,6 +2,6 @@ apiVersion: batch/v1 kind: Job metadata: - name: major-upgrade-empty-image-after + name: create-data status: succeeded: 1 diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml new file mode 100644 index 0000000000..c42f0dec10 --- /dev/null +++ b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml @@ -0,0 +1,32 @@ +--- +# Create some data that should be present after resizing. +apiVersion: batch/v1 +kind: Job +metadata: + name: create-data + labels: { postgres-operator-test: kuttl } +spec: + backoffLimit: 3 + template: + metadata: + labels: { postgres-operator-test: kuttl } + spec: + restartPolicy: Never + containers: + - name: psql + image: ${KUTTL_PSQL_IMAGE} + env: + - name: PGURI + valueFrom: { secretKeyRef: { name: auto-grow-volume-pguser-auto-grow-volume, key: uri } } + + # Do not wait indefinitely, but leave enough time to create the data. + - { name: PGCONNECT_TIMEOUT, value: '60' } + + command: + - psql + - $(PGURI) + - --set=ON_ERROR_STOP=1 + - --command + - | # create schema for user and add enough data to get over 75% usage + CREATE SCHEMA "auto-grow-volume" AUTHORIZATION "auto-grow-volume"; + CREATE TABLE big_table AS SELECT 'data' || s AS mydata FROM generate_series(1,6000000) AS s; diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml deleted file mode 100644 index fa3985231d..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# This upgrade is valid, but has no pgcluster to work on and should get that condition -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -spec: - # postgres version that is no longer available - fromPostgresVersion: 10 - toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} - postgresClusterName: major-upgrade-empty-image diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml deleted file mode 100644 index b7d0f936fb..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/01-assert.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterNotFound" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml deleted file mode 100644 index c85a9b8dae..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Create the cluster we will do an actual upgrade on, but set the postgres version -# to '10' to force a missing image scenario -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -spec: - # postgres version that is no longer available - postgresVersion: 10 - patroni: - dynamicConfiguration: - postgresql: - parameters: - shared_preload_libraries: pgaudit, set_user, pg_stat_statements, pgnodemx, pg_cron - instances: - - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml deleted file mode 100644 index 72e9ff6387..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/10-assert.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# The cluster is not running due to the missing image, not due to a proper -# shutdown status. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml deleted file mode 100644 index 316f3a5472..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# Shutdown the cluster -- but without the annotation. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -spec: - shutdown: true diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml deleted file mode 100644 index 5bd9d447cb..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/11-assert.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Since the cluster is missing the annotation, we get this condition -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml deleted file mode 100644 index fcdf4f62e3..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# Update the postgres version and restart the cluster. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -spec: - shutdown: false - postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -spec: - # update postgres version - fromPostgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml deleted file mode 100644 index 14c33cccfe..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/12-assert.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# Wait for the instances to be ready and the replica backup to complete -# by waiting for the status to signal pods ready and pgbackrest stanza created -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -spec: - postgresVersion: ${KUTTL_PG_UPGRADE_FROM_VERSION} -status: - instances: - - name: '00' - replicas: 1 - readyReplicas: 1 - updatedReplicas: 1 - pgbackrest: - repos: - - name: repo1 - replicaCreateBackupComplete: true - stanzaCreated: true ---- -# Even when the cluster exists, the pgupgrade is not progressing because the cluster is not shutdown -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterNotShutdown" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml deleted file mode 100644 index 316f3a5472..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# Shutdown the cluster -- but without the annotation. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -spec: - shutdown: true diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml deleted file mode 100644 index 78e51e566a..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/13-assert.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Since the cluster is missing the annotation, we get this condition -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - reason: "PGClusterMissingRequiredAnnotation" diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml deleted file mode 100644 index 2fa2c949a9..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# Annotate the cluster for an upgrade. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image - annotations: - postgres-operator.crunchydata.com/allow-upgrade: empty-image-upgrade diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml deleted file mode 100644 index bd828180f4..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/14-assert.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# Now that the postgres cluster is shut down and annotated, the pgupgrade -# can finish reconciling. We know the reconciliation is complete when -# the pgupgrade status is succeeded and the postgres cluster status -# has the updated version. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: empty-image-upgrade -status: - conditions: - - type: "Progressing" - status: "False" - - type: "Succeeded" - status: "True" ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -status: - postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml deleted file mode 100644 index e5f270fb2f..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# Once the pgupgrade is finished, update the version and set shutdown to false -# in the postgres cluster -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -spec: - postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} - shutdown: false diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml deleted file mode 100644 index dfcbd4c819..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/15-assert.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# Wait for the instances to be ready with the target Postgres version. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: major-upgrade-empty-image -status: - postgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} - instances: - - name: '00' - replicas: 1 - readyReplicas: 1 - updatedReplicas: 1 - pgbackrest: - repos: - - name: repo1 - replicaCreateBackupComplete: true - stanzaCreated: true diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml deleted file mode 100644 index 969e7f0ac3..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/16-check-pgbackrest.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: -# Check that the pgbackrest setup has successfully completed -- script: | - kubectl -n "${NAMESPACE}" exec "statefulset.apps/major-upgrade-empty-image-repo-host" -c pgbackrest -- pgbackrest check --stanza=db diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml deleted file mode 100644 index 5315c1d14f..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -# Check the version reported by PostgreSQL -apiVersion: batch/v1 -kind: Job -metadata: - name: major-upgrade-empty-image-after - labels: { postgres-operator-test: kuttl } -spec: - backoffLimit: 6 - template: - metadata: - labels: { postgres-operator-test: kuttl } - spec: - restartPolicy: Never - containers: - - name: psql - image: ${KUTTL_PSQL_IMAGE} - env: - - name: PGURI - valueFrom: { secretKeyRef: { name: major-upgrade-empty-image-pguser-major-upgrade-empty-image, key: uri } } - - # Do not wait indefinitely. - - { name: PGCONNECT_TIMEOUT, value: '5' } - - # Note: the `$$$$` is reduced to `$$` by Kubernetes. - # - https://kubernetes.io/docs/tasks/inject-data-application/ - command: - - psql - - $(PGURI) - - --quiet - - --echo-errors - - --set=ON_ERROR_STOP=1 - - --command - - | - DO $$$$ - BEGIN - ASSERT current_setting('server_version_num') LIKE '${KUTTL_PG_UPGRADE_TO_VERSION}%', - format('got %L', current_setting('server_version_num')); - END $$$$; diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md b/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md deleted file mode 100644 index 341cc854f7..0000000000 --- a/testing/kuttl/e2e-other/major-upgrade-missing-image/README.md +++ /dev/null @@ -1,36 +0,0 @@ -## Major upgrade missing image tests - -This is a variation derived from our major upgrade KUTTL tests designed to -test scenarios where required container images are not defined in either the -PostgresCluster spec or via the RELATED_IMAGES environment variables. - -### Basic PGUpgrade controller and CRD instance validation - -* 01--valid-upgrade: create a valid PGUpgrade instance -* 01-assert: check that the PGUpgrade instance exists and has the expected status - -### Verify new statuses for missing required container images - -* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) -* 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" -* 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade -* 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" - -### Update to an available Postgres version, start and upgrade PostgresCluster - -* 12--start-and-update-version: update the Postgres version on both CRD instances and set 'shutdown' to false -* 12-assert: verify that the cluster is running and the PGUpgrade instance now has the new status info with reason: "PGClusterNotShutdown" -* 13--shutdown-cluster: set spec.shutdown to 'true' -* 13-assert: check that the PGUpgrade instance has the expected reason: "PGClusterMissingRequiredAnnotation" -* 14--annotate-cluster: set the required annotation -* 14-assert: verify that the upgrade succeeded and the new Postgres version shows in the cluster's status -* 15--start-cluster: set the new Postgres version and spec.shutdown to 'false' - -### Verify upgraded PostgresCluster - -* 15-assert: verify that the cluster is running -* 16-check-pgbackrest: check that the pgbackrest setup has successfully completed -* 17--check-version: check the version reported by PostgreSQL -* 17-assert: assert the Job from the previous step succeeded - - diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/03-assert.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-user-management/03-assert.yaml deleted file mode 100644 index d3941893f2..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/03-assert.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User -- script: | - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - - bob_role=$(jq '.[] | select(.username=="bob@example.com") | .role' <<< $users_in_pgadmin) - dave_role=$(jq '.[] | select(.username=="dave@example.com") | .role' <<< $users_in_pgadmin) - jimi_role=$(jq '.[] | select(.username=="jimi@example.com") | .role' <<< $users_in_pgadmin) - - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 - - users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - - bob_is_admin=$(jq '.[] | select(.username=="bob@example.com") | .isAdmin' <<< $users_in_secret) - dave_is_admin=$(jq '.[] | select(.username=="dave@example.com") | .isAdmin' <<< $users_in_secret) - jimi_is_admin=$(jq '.[] | select(.username=="jimi@example.com") | .isAdmin' <<< $users_in_secret) - - $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 - - bob_password=$(jq -r '.[] | select(.username=="bob@example.com") | .password' <<< $users_in_secret) - dave_password=$(jq -r '.[] | select(.username=="dave@example.com") | .password' <<< $users_in_secret) - jimi_password=$(jq -r '.[] | select(.username=="jimi@example.com") | .password' <<< $users_in_secret) - - [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] && [ "$jimi_password" = "password789" ] || exit 1 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/05-assert.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-user-management/05-assert.yaml deleted file mode 100644 index 89013440c2..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/05-assert.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User -- script: | - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - - bob_role=$(jq '.[] | select(.username=="bob@example.com") | .role' <<< $users_in_pgadmin) - dave_role=$(jq '.[] | select(.username=="dave@example.com") | .role' <<< $users_in_pgadmin) - jimi_role=$(jq '.[] | select(.username=="jimi@example.com") | .role' <<< $users_in_pgadmin) - - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 - - users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - - bob_is_admin=$(jq '.[] | select(.username=="bob@example.com") | .isAdmin' <<< $users_in_secret) - dave_is_admin=$(jq '.[] | select(.username=="dave@example.com") | .isAdmin' <<< $users_in_secret) - jimi_is_admin=$(jq '.[] | select(.username=="jimi@example.com") | .isAdmin' <<< $users_in_secret) - - $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 - - bob_password=$(jq -r '.[] | select(.username=="bob@example.com") | .password' <<< $users_in_secret) - dave_password=$(jq -r '.[] | select(.username=="dave@example.com") | .password' <<< $users_in_secret) - jimi_password=$(jq -r '.[] | select(.username=="jimi@example.com") | .password' <<< $users_in_secret) - - [ "$bob_password" = "NEWpassword123" ] && [ "$dave_password" = "NEWpassword456" ] && [ "$jimi_password" = "NEWpassword789" ] || exit 1 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/00--create-pgadmin.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/00--create-pgadmin.yaml deleted file mode 100644 index ee1a03ec64..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/00--create-pgadmin.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/00-pgadmin.yaml -assert: -- files/00-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/01-assert.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/01-assert.yaml deleted file mode 100644 index 6b7c8c8794..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/01-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected="\"Servers\": {}" - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - exit 1 - } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/02--create-cluster.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/02--create-cluster.yaml deleted file mode 100644 index bee91ce0a4..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/02--create-cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/02-cluster.yaml -- files/02-pgadmin.yaml -assert: -- files/02-cluster-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/03-assert.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/03-assert.yaml deleted file mode 100644 index 169a8261eb..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/03-assert.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/04--create-cluster.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/04--create-cluster.yaml deleted file mode 100644 index 5701678501..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/04--create-cluster.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/04-cluster.yaml -assert: -- files/04-cluster-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/05-assert.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/05-assert.yaml deleted file mode 100644 index 7fe5b69dc2..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/05-assert.yaml +++ /dev/null @@ -1,102 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - diff_comp "${data_actual}" "${data_expected}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - }, - "2": { - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin2", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin2" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - diff_comp "${config_updated}" "${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "2": { - "Name": "pgadmin2", - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin2", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/06--create-cluster.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/06--create-cluster.yaml deleted file mode 100644 index 86b5f8bf04..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/06--create-cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/06-cluster.yaml -- files/06-pgadmin.yaml -assert: -- files/06-cluster-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/07-assert.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/07-assert.yaml deleted file mode 100644 index 323237cad4..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/07-assert.yaml +++ /dev/null @@ -1,126 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n },\n \"3\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - diff_comp "${data_actual}" "${data_expected}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - }, - "2": { - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin2", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin2" - }, - "3": { - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin3", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin3" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - diff_comp "${config_updated}" "${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "2": { - "Name": "pgadmin2", - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin2", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "3": { - "Name": "pgadmin3", - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin3", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/08--delete-cluster.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/08--delete-cluster.yaml deleted file mode 100644 index bc11ea62f4..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/08--delete-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -delete: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - kind: PostgresCluster - name: pgadmin2 -error: -- files/04-cluster-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/09-assert.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/09-assert.yaml deleted file mode 100644 index eca5581cb7..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/09-assert.yaml +++ /dev/null @@ -1,102 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - diff_comp "${data_actual}" "${data_expected}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - }, - "2": { - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin3", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin3" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - diff_comp "${config_updated}" "${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "2": { - "Name": "pgadmin3", - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin3", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/README.md b/testing/kuttl/e2e-other/standalone-pgadmin-v8/README.md deleted file mode 100644 index 22bdd71854..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/README.md +++ /dev/null @@ -1,64 +0,0 @@ -** pgAdmin ** - -(This test should replace `testing/kuttl/e2e/standalone-pgadmin` once pgAdmin4 v8 is released.) - -Note: due to the (random) namespace being part of the host, we cannot check the configmap using the usual assert/file pattern. - -*Phase one* - -* 00: - * create a pgadmin with no server groups; - * check the correct existence of the secret, configmap, and pod. -* 01: dump the servers from pgAdmin and check that the list is empty. - -*Phase two* - -* 02: - * create a postgrescluster with a label; - * update the pgadmin with a selector; - * check the correct existence of the postgrescluster. -* 03: - * check that the configmap is updated in the pgadmin pod; - * dump the servers from pgAdmin and check that the list has the expected server. - -*Phase three* - -* 04: - * create a postgrescluster with the same label; - * check the correct existence of the postgrescluster. -* 05: - * check that the configmap is updated in the pgadmin pod; - * dump the servers from pgAdmin and check that the list has the expected 2 servers. - -*Phase four* - -* 06: - * create a postgrescluster with the a different label; - * update the pgadmin with a second serverGroup; - * check the correct existence of the postgrescluster. -* 07: - * check that the configmap is updated in the pgadmin pod; - * dump the servers from pgAdmin and check that the list has the expected 3 servers. - -*Phase five* - -* 08: - * delete a postgrescluster; - * update the pgadmin with a second serverGroup; - * check the correct existence of the postgrescluster. -* 09: - * check that the configmap is updated in the pgadmin pod; - * dump the servers from pgAdmin and check that the list has the expected 2 servers - -pgAdmin v7 vs v8 Notes: -pgAdmin v8 includes updates to `setup.py` which alter how the `dump-servers` argument -is called: -- v7: https://github.com/pgadmin-org/pgadmin4/blob/REL-7_8/web/setup.py#L175 -- v8: https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L79 - -You will also notice a difference in the `assert.yaml` files between the stored -config and the config returned by the `dump-servers` command. The additional setting, -`"TunnelPort": "22"`, is due to the new defaulting behavior added to pgAdmin for psycopg3. -See -- https://github.com/pgadmin-org/pgadmin4/commit/5e0daccf7655384db076512247733d7e73025d1b -- https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/pgadmin/utils/driver/psycopg3/server_manager.py#L94 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/00-pgadmin-check.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/00-pgadmin-check.yaml deleted file mode 100644 index a9fe716e2e..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/00-pgadmin-check.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -data: - pgadmin-settings.json: | - { - "DEFAULT_SERVER": "0.0.0.0", - "SERVER_MODE": true, - "UPGRADE_CHECK_ENABLED": false, - "UPGRADE_CHECK_KEY": "", - "UPGRADE_CHECK_URL": "" - } - pgadmin-shared-clusters.json: | - { - "Servers": {} - } ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/data: pgadmin - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -status: - containerStatuses: - - name: pgadmin - ready: true - started: true - phase: Running ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -type: Opaque diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/00-pgadmin.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/00-pgadmin.yaml deleted file mode 100644 index 692c0cd06d..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/00-pgadmin.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: [] diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-cluster-check.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-cluster-check.yaml deleted file mode 100644 index 16fa079176..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-cluster-check.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: pgadmin1 - labels: - hello: world diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-cluster.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-cluster.yaml deleted file mode 100644 index c1280caa01..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-cluster.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: pgadmin1 - labels: - hello: world -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-pgadmin.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-pgadmin.yaml deleted file mode 100644 index 7ad3b0c4d3..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/02-pgadmin.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: - - name: groupOne - postgresClusterSelector: - matchLabels: - hello: world diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/04-cluster-check.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/04-cluster-check.yaml deleted file mode 100644 index b3de0cfc54..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/04-cluster-check.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: pgadmin2 - labels: - hello: world diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/04-cluster.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/04-cluster.yaml deleted file mode 100644 index 63a44812e1..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/04-cluster.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: pgadmin2 - labels: - hello: world -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-cluster-check.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-cluster-check.yaml deleted file mode 100644 index 31de80c896..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-cluster-check.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: pgadmin3 - labels: - hello: world2 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-cluster.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-cluster.yaml deleted file mode 100644 index 40f60cf229..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-cluster.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: pgadmin3 - labels: - hello: world2 -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-pgadmin.yaml b/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-pgadmin.yaml deleted file mode 100644 index 5951c16270..0000000000 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/06-pgadmin.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: - - name: groupOne - postgresClusterSelector: - matchLabels: - hello: world - - name: groupTwo - postgresClusterSelector: - matchLabels: - hello: world2 diff --git a/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml index 5c867a7892..a5fe982b1a 100644 --- a/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/00-cluster-created.yaml @@ -3,19 +3,6 @@ kind: PostgresCluster metadata: name: cluster-pause status: - conditions: - - message: pgBackRest dedicated repository host is ready - reason: RepoHostReady - status: "True" - type: PGBackRestRepoHostReady - - message: pgBackRest replica create repo is ready for backups - reason: StanzaCreated - status: "True" - type: PGBackRestReplicaRepoReady - - message: pgBackRest replica creation is now possible - reason: RepoBackupComplete - status: "True" - type: PGBackRestReplicaCreate instances: - name: instance1 readyReplicas: 1 diff --git a/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml index abf7b9f4f2..9f687a1dfa 100644 --- a/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/00-create-cluster.yaml @@ -12,14 +12,3 @@ spec: resources: requests: storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml index ecd459d3e1..6776fc542b 100644 --- a/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/01-cluster-paused.yaml @@ -4,18 +4,6 @@ metadata: name: cluster-pause status: conditions: - - message: pgBackRest dedicated repository host is ready - reason: RepoHostReady - status: "True" - type: PGBackRestRepoHostReady - - message: pgBackRest replica create repo is ready for backups - reason: StanzaCreated - status: "True" - type: PGBackRestReplicaRepoReady - - message: pgBackRest replica creation is now possible - reason: RepoBackupComplete - status: "True" - type: PGBackRestReplicaCreate - message: No spec changes will be applied and no other statuses will be updated. reason: Paused status: "False" diff --git a/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml index 1c90fe5f22..82062fb908 100644 --- a/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml +++ b/testing/kuttl/e2e/cluster-pause/files/02-cluster-resumed.yaml @@ -3,19 +3,6 @@ kind: PostgresCluster metadata: name: cluster-pause status: - conditions: - - message: pgBackRest dedicated repository host is ready - reason: RepoHostReady - status: "True" - type: PGBackRestRepoHostReady - - message: pgBackRest replica create repo is ready for backups - reason: StanzaCreated - status: "True" - type: PGBackRestReplicaRepoReady - - message: pgBackRest replica creation is now possible - reason: RepoBackupComplete - status: "True" - type: PGBackRestReplicaCreate instances: - name: instance1 readyReplicas: 1 diff --git a/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml index ecc6ab7fe8..4eebece89e 100644 --- a/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml +++ b/testing/kuttl/e2e/cluster-start/files/00-cluster-created.yaml @@ -9,15 +9,6 @@ status: replicas: 1 updatedReplicas: 1 --- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: cluster-start - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 ---- apiVersion: v1 kind: Service metadata: diff --git a/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml index a870d940f1..713cd14eb3 100644 --- a/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml +++ b/testing/kuttl/e2e/cluster-start/files/00-create-cluster.yaml @@ -12,14 +12,3 @@ spec: resources: requests: storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/delete-namespace/00-assert.yaml b/testing/kuttl/e2e/delete-namespace/00-assert.yaml new file mode 100644 index 0000000000..78aea811c3 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n ${KUTTL_TEST_DELETE_NAMESPACE} describe pods --selector postgres-operator.crunchydata.com/cluster=delete-namespace +- namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + selector: postgres-operator.crunchydata.com/cluster=delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml b/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml new file mode 100644 index 0000000000..2245df00c8 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/00-create-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-namespace.yaml +- files/00-create-cluster.yaml +assert: +- files/00-created.yaml diff --git a/testing/kuttl/e2e/delete-namespace/01-assert.yaml b/testing/kuttl/e2e/delete-namespace/01-assert.yaml index 3d2c7ec936..78aea811c3 100644 --- a/testing/kuttl/e2e/delete-namespace/01-assert.yaml +++ b/testing/kuttl/e2e/delete-namespace/01-assert.yaml @@ -1,22 +1,7 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-namespace - namespace: ${KUTTL_TEST_DELETE_NAMESPACE} -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - namespace: ${KUTTL_TEST_DELETE_NAMESPACE} - labels: - postgres-operator.crunchydata.com/cluster: delete-namespace - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n ${KUTTL_TEST_DELETE_NAMESPACE} describe pods --selector postgres-operator.crunchydata.com/cluster=delete-namespace +- namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + selector: postgres-operator.crunchydata.com/cluster=delete-namespace diff --git a/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml b/testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml similarity index 84% rename from testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml rename to testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml index 8987d233f1..8fed721e5e 100644 --- a/testing/kuttl/e2e/delete-namespace/02--delete-namespace.yaml +++ b/testing/kuttl/e2e/delete-namespace/01-delete-namespace.yaml @@ -6,3 +6,5 @@ delete: - apiVersion: v1 kind: Namespace name: ${KUTTL_TEST_DELETE_NAMESPACE} +error: +- files/01-errors.yaml diff --git a/testing/kuttl/e2e/delete-namespace/01--cluster.yaml b/testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/delete-namespace/01--cluster.yaml rename to testing/kuttl/e2e/delete-namespace/files/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/delete-namespace/00--namespace.yaml b/testing/kuttl/e2e/delete-namespace/files/00-create-namespace.yaml similarity index 100% rename from testing/kuttl/e2e/delete-namespace/00--namespace.yaml rename to testing/kuttl/e2e/delete-namespace/files/00-create-namespace.yaml diff --git a/testing/kuttl/e2e/delete-namespace/files/00-created.yaml b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml new file mode 100644 index 0000000000..3d2c7ec936 --- /dev/null +++ b/testing/kuttl/e2e/delete-namespace/files/00-created.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-namespace + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + namespace: ${KUTTL_TEST_DELETE_NAMESPACE} + labels: + postgres-operator.crunchydata.com/cluster: delete-namespace + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete-namespace/02-errors.yaml b/testing/kuttl/e2e/delete-namespace/files/01-errors.yaml similarity index 100% rename from testing/kuttl/e2e/delete-namespace/02-errors.yaml rename to testing/kuttl/e2e/delete-namespace/files/01-errors.yaml diff --git a/testing/kuttl/e2e/delete/00-assert.yaml b/testing/kuttl/e2e/delete/00-assert.yaml index 6130475c07..e4d88b3031 100644 --- a/testing/kuttl/e2e/delete/00-assert.yaml +++ b/testing/kuttl/e2e/delete/00-assert.yaml @@ -1,20 +1,7 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=delete diff --git a/testing/kuttl/e2e/delete/00-create-cluster.yaml b/testing/kuttl/e2e/delete/00-create-cluster.yaml new file mode 100644 index 0000000000..801a22d460 --- /dev/null +++ b/testing/kuttl/e2e/delete/00-create-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/00-create-cluster.yaml +assert: +- files/00-cluster-created.yaml diff --git a/testing/kuttl/e2e/delete/01--delete-cluster.yaml b/testing/kuttl/e2e/delete/01-delete-cluster.yaml similarity index 79% rename from testing/kuttl/e2e/delete/01--delete-cluster.yaml rename to testing/kuttl/e2e/delete/01-delete-cluster.yaml index ccb36f0166..a1f26b39c4 100644 --- a/testing/kuttl/e2e/delete/01--delete-cluster.yaml +++ b/testing/kuttl/e2e/delete/01-delete-cluster.yaml @@ -1,8 +1,8 @@ ---- -# Remove the cluster. apiVersion: kuttl.dev/v1beta1 kind: TestStep delete: - apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: delete +error: +- files/01-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/10-assert.yaml b/testing/kuttl/e2e/delete/10-assert.yaml index 1940fc680a..a2c226cc7a 100644 --- a/testing/kuttl/e2e/delete/10-assert.yaml +++ b/testing/kuttl/e2e/delete/10-assert.yaml @@ -1,36 +1,7 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-with-replica -status: - instances: - - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 ---- -# Patroni labels and readiness happen separately. -# The next step expects to find pods by their role label; wait for them here. -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-with-replica - postgres-operator.crunchydata.com/role: master ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-with-replica - postgres-operator.crunchydata.com/role: replica ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-with-replica - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete-with-replica +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/cluster=delete-with-replica diff --git a/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml b/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml new file mode 100644 index 0000000000..678a09c710 --- /dev/null +++ b/testing/kuttl/e2e/delete/10-create-cluster-with-replicas.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/10-create-cluster-with-replicas.yaml +assert: +- files/10-cluster-with-replicas-created.yaml diff --git a/testing/kuttl/e2e/delete/11-delete-cluster.yaml b/testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml similarity index 78% rename from testing/kuttl/e2e/delete/11-delete-cluster.yaml rename to testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml index 991d8d1c44..b2f04ea7ed 100644 --- a/testing/kuttl/e2e/delete/11-delete-cluster.yaml +++ b/testing/kuttl/e2e/delete/11-delete-cluster-with-replicas.yaml @@ -6,3 +6,5 @@ delete: - apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: delete-with-replica +error: +- files/11-cluster-with-replicas-deleted.yaml diff --git a/testing/kuttl/e2e/delete/20-assert.yaml b/testing/kuttl/e2e/delete/20-assert.yaml new file mode 100644 index 0000000000..d85d96101f --- /dev/null +++ b/testing/kuttl/e2e/delete/20-assert.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=delete-not-running +# This shouldn't be running, so skip logs; if there's an error, we'll be able to see it in the describe diff --git a/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml b/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml new file mode 100644 index 0000000000..9db684036e --- /dev/null +++ b/testing/kuttl/e2e/delete/20-create-broken-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +apply: +- files/20-create-broken-cluster.yaml +error: +- files/20-broken-cluster-not-created.yaml diff --git a/testing/kuttl/e2e/delete/21--delete-cluster.yaml b/testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml similarity index 80% rename from testing/kuttl/e2e/delete/21--delete-cluster.yaml rename to testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml index b585401167..3e159f17d4 100644 --- a/testing/kuttl/e2e/delete/21--delete-cluster.yaml +++ b/testing/kuttl/e2e/delete/21-delete-broken-cluster.yaml @@ -6,3 +6,5 @@ delete: - apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster name: delete-not-running +error: +- files/21-broken-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/README.md b/testing/kuttl/e2e/delete/README.md index 3a7d4fd848..7e99680162 100644 --- a/testing/kuttl/e2e/delete/README.md +++ b/testing/kuttl/e2e/delete/README.md @@ -1,18 +1,18 @@ ### Delete test -#### Regular cluster delete +#### Regular cluster delete (00-01) * Start a regular cluster * Delete it * Check that nothing remains. -#### Delete cluster with replica +#### Delete cluster with replica (10-11) * Start a regular cluster with 2 replicas * Delete it * Check that nothing remains -#### Delete a cluster that never started +#### Delete a cluster that never started (20-21) * Start a cluster with a bad image * Delete it diff --git a/testing/kuttl/e2e/delete/files/00-cluster-created.yaml b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml new file mode 100644 index 0000000000..6130475c07 --- /dev/null +++ b/testing/kuttl/e2e/delete/files/00-cluster-created.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete +status: + instances: + - name: instance1 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete/00--cluster.yaml b/testing/kuttl/e2e/delete/files/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/delete/00--cluster.yaml rename to testing/kuttl/e2e/delete/files/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/delete/02-errors.yaml b/testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml similarity index 100% rename from testing/kuttl/e2e/delete/02-errors.yaml rename to testing/kuttl/e2e/delete/files/01-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml new file mode 100644 index 0000000000..1940fc680a --- /dev/null +++ b/testing/kuttl/e2e/delete/files/10-cluster-with-replicas-created.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: delete-with-replica +status: + instances: + - name: instance1 + readyReplicas: 2 + replicas: 2 + updatedReplicas: 2 +--- +# Patroni labels and readiness happen separately. +# The next step expects to find pods by their role label; wait for them here. +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: master +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/role: replica +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: delete-with-replica + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/delete/10--cluster.yaml b/testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml similarity index 100% rename from testing/kuttl/e2e/delete/10--cluster.yaml rename to testing/kuttl/e2e/delete/files/10-create-cluster-with-replicas.yaml diff --git a/testing/kuttl/e2e/delete/12-errors.yaml b/testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml similarity index 100% rename from testing/kuttl/e2e/delete/12-errors.yaml rename to testing/kuttl/e2e/delete/files/11-cluster-with-replicas-deleted.yaml diff --git a/testing/kuttl/e2e/delete/20-errors.yaml b/testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml similarity index 100% rename from testing/kuttl/e2e/delete/20-errors.yaml rename to testing/kuttl/e2e/delete/files/20-broken-cluster-not-created.yaml diff --git a/testing/kuttl/e2e/delete/20--cluster.yaml b/testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/delete/20--cluster.yaml rename to testing/kuttl/e2e/delete/files/20-create-broken-cluster.yaml diff --git a/testing/kuttl/e2e/delete/22-errors.yaml b/testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml similarity index 100% rename from testing/kuttl/e2e/delete/22-errors.yaml rename to testing/kuttl/e2e/delete/files/21-broken-cluster-deleted.yaml diff --git a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml index 6ff8ed5e67..5356b83be9 100644 --- a/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml +++ b/testing/kuttl/e2e/exporter-custom-queries/files/exporter-custom-queries-cluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: diff --git a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml index 9cc6ec4877..690d5b505d 100644 --- a/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml +++ b/testing/kuttl/e2e/exporter-no-tls/files/exporter-no-tls-cluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: {} diff --git a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml index e3fbb7b94a..d16c898ac2 100644 --- a/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml +++ b/testing/kuttl/e2e/exporter-password-change/files/initial-postgrescluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: {} diff --git a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml index d445062bf3..4fa420664a 100644 --- a/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml +++ b/testing/kuttl/e2e/exporter-tls/files/exporter-tls-cluster.yaml @@ -7,12 +7,6 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } monitoring: pgmonitor: exporter: diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml index fa3985231d..741efead41 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml @@ -6,6 +6,6 @@ metadata: name: empty-image-upgrade spec: # postgres version that is no longer available - fromPostgresVersion: 10 + fromPostgresVersion: 11 toPostgresVersion: ${KUTTL_PG_UPGRADE_TO_VERSION} postgresClusterName: major-upgrade-empty-image diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml index c85a9b8dae..f5ef8c029e 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml +++ b/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml @@ -7,7 +7,7 @@ metadata: name: major-upgrade-empty-image spec: # postgres version that is no longer available - postgresVersion: 10 + postgresVersion: 11 patroni: dynamicConfiguration: postgresql: diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/README.md b/testing/kuttl/e2e/major-upgrade-missing-image/README.md index 341cc854f7..1053da29ed 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/README.md +++ b/testing/kuttl/e2e/major-upgrade-missing-image/README.md @@ -11,7 +11,7 @@ PostgresCluster spec or via the RELATED_IMAGES environment variables. ### Verify new statuses for missing required container images -* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) +* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 11) * 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" * 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade * 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" diff --git a/testing/kuttl/e2e/optional-backups/00--cluster.yaml b/testing/kuttl/e2e/optional-backups/00--cluster.yaml new file mode 100644 index 0000000000..7b927831e0 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/00--cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/00-assert.yaml b/testing/kuttl/e2e/optional-backups/00-assert.yaml new file mode 100644 index 0000000000..86392d0308 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/00-assert.yaml @@ -0,0 +1,38 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + instances: + - name: instance1 + pgbackrest: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: master +status: + containerStatuses: + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/01-errors.yaml b/testing/kuttl/e2e/optional-backups/01-errors.yaml new file mode 100644 index 0000000000..e702fcddb4 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/01-errors.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/02-assert.yaml b/testing/kuttl/e2e/optional-backups/02-assert.yaml new file mode 100644 index 0000000000..eb3f70357f --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/02-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'true', + format('expected "true", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/03-assert.yaml b/testing/kuttl/e2e/optional-backups/03-assert.yaml new file mode 100644 index 0000000000..17ca1e4062 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/03-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 \ + -c "CREATE TABLE important (data) AS VALUES ('treasure');" + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 \ + -c "CHECKPOINT;" diff --git a/testing/kuttl/e2e/optional-backups/04--cluster.yaml b/testing/kuttl/e2e/optional-backups/04--cluster.yaml new file mode 100644 index 0000000000..fc39ff6ebe --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/04--cluster.yaml @@ -0,0 +1,16 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/05-assert.yaml b/testing/kuttl/e2e/optional-backups/05-assert.yaml new file mode 100644 index 0000000000..d346e01a04 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/05-assert.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: replica +status: + containerStatuses: + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/06-assert.yaml b/testing/kuttl/e2e/optional-backups/06-assert.yaml new file mode 100644 index 0000000000..c366545508 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/06-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups \ + -l postgres-operator.crunchydata.com/role=replica) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + DECLARE + everything jsonb; + BEGIN + SELECT jsonb_agg(important) INTO everything FROM important; + ASSERT everything = '[{"data":"treasure"}]', format('got %L', everything); + END $$ + SQL diff --git a/installers/olm/config/examples/postgrescluster.example.yaml b/testing/kuttl/e2e/optional-backups/10--cluster.yaml similarity index 63% rename from installers/olm/config/examples/postgrescluster.example.yaml rename to testing/kuttl/e2e/optional-backups/10--cluster.yaml index 502eaff437..6da85c93f9 100644 --- a/installers/olm/config/examples/postgrescluster.example.yaml +++ b/testing/kuttl/e2e/optional-backups/10--cluster.yaml @@ -1,13 +1,15 @@ apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster metadata: - name: example + name: created-without-backups spec: - postgresVersion: 15 + postgresVersion: ${KUTTL_PG_VERSION} instances: - - replicas: 1 + - name: instance1 + replicas: 1 dataVolumeClaimSpec: - accessModes: [ReadWriteOnce] + accessModes: + - "ReadWriteOnce" resources: requests: storage: 1Gi @@ -17,7 +19,9 @@ spec: - name: repo1 volume: volumeClaimSpec: - accessModes: ["ReadWriteOnce"] + accessModes: + - "ReadWriteOnce" resources: requests: storage: 1Gi + diff --git a/testing/kuttl/e2e/optional-backups/10-assert.yaml b/testing/kuttl/e2e/optional-backups/10-assert.yaml new file mode 100644 index 0000000000..7b740b310d --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/10-assert.yaml @@ -0,0 +1,79 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/patroni: created-without-backups-ha + postgres-operator.crunchydata.com/role: master +status: + containerStatuses: + - ready: true + - ready: true + - ready: true + - ready: true diff --git a/testing/kuttl/e2e/optional-backups/11-assert.yaml b/testing/kuttl/e2e/optional-backups/11-assert.yaml new file mode 100644 index 0000000000..5976d03f41 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/11-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backup \ + -l postgres-operator.crunchydata.com/instance-set=instance1 \ + -l postgres-operator.crunchydata.com/patroni=created-without-backups-ha \ + -l postgres-operator.crunchydata.com/role=master) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'pgbackrest --stanza=db archive-push "%p"', + format('expected "pgbackrest --stanza=db archive-push \"%p\"", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/20--cluster.yaml b/testing/kuttl/e2e/optional-backups/20--cluster.yaml new file mode 100644 index 0000000000..8e0d01cbf8 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/20--cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: |- + kubectl patch postgrescluster created-without-backups --type 'merge' -p '{"spec":{"backups": null}}' + namespaced: true diff --git a/testing/kuttl/e2e/optional-backups/20-assert.yaml b/testing/kuttl/e2e/optional-backups/20-assert.yaml new file mode 100644 index 0000000000..b469e277f8 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/20-assert.yaml @@ -0,0 +1,63 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/21-assert.yaml b/testing/kuttl/e2e/optional-backups/21-assert.yaml new file mode 100644 index 0000000000..5976d03f41 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/21-assert.yaml @@ -0,0 +1,18 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backup \ + -l postgres-operator.crunchydata.com/instance-set=instance1 \ + -l postgres-operator.crunchydata.com/patroni=created-without-backups-ha \ + -l postgres-operator.crunchydata.com/role=master) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'pgbackrest --stanza=db archive-push "%p"', + format('expected "pgbackrest --stanza=db archive-push \"%p\"", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/22--cluster.yaml b/testing/kuttl/e2e/optional-backups/22--cluster.yaml new file mode 100644 index 0000000000..2e25309886 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/22--cluster.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- command: kubectl annotate postgrescluster created-without-backups postgres-operator.crunchydata.com/authorizeBackupRemoval="true" + namespaced: true diff --git a/testing/kuttl/e2e/optional-backups/23-assert.yaml b/testing/kuttl/e2e/optional-backups/23-assert.yaml new file mode 100644 index 0000000000..8748ea015c --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/23-assert.yaml @@ -0,0 +1,26 @@ +# It should be possible to turn backups back on. +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: created-without-backups +status: + instances: + - name: instance1 + pgbackrest: {} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 + postgres-operator.crunchydata.com/role: pgdata +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + postgres-operator.crunchydata.com/cluster: created-without-backups + postgres-operator.crunchydata.com/data: postgres + postgres-operator.crunchydata.com/instance-set: instance1 diff --git a/testing/kuttl/e2e/optional-backups/24-errors.yaml b/testing/kuttl/e2e/optional-backups/24-errors.yaml new file mode 100644 index 0000000000..e702fcddb4 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/24-errors.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: created-without-backups-repo1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: created-without-backups-repo-host +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: created-without-backups-pgbackrest-config +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: created-without-backups-pgbackrest +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: created-without-backups-pgbackrest diff --git a/testing/kuttl/e2e/optional-backups/25-assert.yaml b/testing/kuttl/e2e/optional-backups/25-assert.yaml new file mode 100644 index 0000000000..eb3f70357f --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/25-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +- script: | + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=created-without-backups) + + kubectl exec --stdin "${pod}" --namespace "${NAMESPACE}" -c database \ + -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' + DO $$ + BEGIN + ASSERT current_setting('archive_command') LIKE 'true', + format('expected "true", got %L', current_setting('archive_command')); + END $$ + SQL diff --git a/testing/kuttl/e2e/optional-backups/README.md b/testing/kuttl/e2e/optional-backups/README.md new file mode 100644 index 0000000000..92c52d4136 --- /dev/null +++ b/testing/kuttl/e2e/optional-backups/README.md @@ -0,0 +1,13 @@ +## Optional backups + +### Steps + +00-02. Create cluster without backups, check that expected K8s objects do/don't exist, e.g., repo-host sts doesn't exist; check that the archive command is `true` + +03-06. Add data and a replica; check that the data successfully replicates to the replica. + +10-11. Update cluster to add backups, check that expected K8s objects do/don't exist, e.g., repo-host sts exists; check that the archive command is set to the usual + +20-21. Update cluster to remove backups but without annotation, check that no changes were made, including to the archive command + +22-25. Annotate cluster to remove existing backups, check that expected K8s objects do/don't exist, e.g., repo-host sts doesn't exist; check that the archive command is `true` diff --git a/testing/kuttl/e2e/password-change/00--cluster.yaml b/testing/kuttl/e2e/password-change/00--cluster.yaml index 2777286880..d7b7019b62 100644 --- a/testing/kuttl/e2e/password-change/00--cluster.yaml +++ b/testing/kuttl/e2e/password-change/00--cluster.yaml @@ -12,14 +12,3 @@ spec: resources: requests: storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/pgadmin/01--cluster.yaml b/testing/kuttl/e2e/pgadmin/01--cluster.yaml index 2cc932c463..d1afb7be04 100644 --- a/testing/kuttl/e2e/pgadmin/01--cluster.yaml +++ b/testing/kuttl/e2e/pgadmin/01--cluster.yaml @@ -25,12 +25,6 @@ spec: - name: instance1 replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } userInterface: pgAdmin: dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml new file mode 100644 index 0000000000..9665fac665 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + global: + backup-standby: "y" + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml new file mode 100644 index 0000000000..d69a3c68b5 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/00-assert.yaml @@ -0,0 +1,23 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + postgres-operator.crunchydata.com/cluster: pgbackrest-backup-standby + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + phase: Failed diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml new file mode 100644 index 0000000000..72d2050d4a --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml @@ -0,0 +1,20 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +# First, find at least one backup job pod. +# Then, check the logs for the 'unable to find standby cluster' line. +# If this line isn't found, exit 1. +- script: | + retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ + -l postgres-operator.crunchydata.com/cluster=pgbackrest-backup-standby \ + -l postgres-operator.crunchydata.com/pgbackrest-backup=replica-create) + [ "$pod" = "" ] && retry "Pod not found" && exit 1 + + logs=$(kubectl logs "${pod}" --namespace "${NAMESPACE}") + { contains "${logs}" 'unable to find standby cluster - cannot proceed'; } || { + echo 'did not find expected standby cluster error ' + exit 1 + } diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml new file mode 100644 index 0000000000..c986f4a9de --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml @@ -0,0 +1,28 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +spec: + postgresVersion: ${KUTTL_PG_VERSION} + instances: + - name: instance1 + replicas: 2 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + global: + backup-standby: "y" + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml new file mode 100644 index 0000000000..92f7b12f5a --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/02-assert.yaml @@ -0,0 +1,25 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: pgbackrest-backup-standby +status: + pgbackrest: + repoHost: + apiVersion: apps/v1 + kind: StatefulSet + ready: true + repos: + - bound: true + name: repo1 + replicaCreateBackupComplete: true + stanzaCreated: true +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + postgres-operator.crunchydata.com/cluster: pgbackrest-backup-standby + postgres-operator.crunchydata.com/pgbackrest-backup: replica-create + postgres-operator.crunchydata.com/pgbackrest-repo: repo1 +status: + succeeded: 1 diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/README.md b/testing/kuttl/e2e/pgbackrest-backup-standby/README.md new file mode 100644 index 0000000000..39fb8707a8 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-backup-standby/README.md @@ -0,0 +1,5 @@ +### pgBackRest backup-standby test + +* 00: Create a cluster with 'backup-standby' set to 'y' but with only one replica. +* 01: Check the backup Job Pod logs for the expected error. +* 02: Update the cluster to have 2 replicas and verify that the cluster can initialize successfully and the backup job can complete. diff --git a/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml b/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml new file mode 100644 index 0000000000..e32cc2fc87 --- /dev/null +++ b/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml @@ -0,0 +1,17 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/role=master' + ) + + LIST=$( + kubectl exec --namespace "${NAMESPACE}" -c database "${PRIMARY}" -- \ + ls -l /pgdata + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + contains "$LIST" "pgbackrest-spool" || exit 1 diff --git a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml index c83bfea9d3..4699d90171 100644 --- a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml +++ b/testing/kuttl/e2e/pgbouncer/00--cluster.yaml @@ -7,14 +7,8 @@ spec: postgresVersion: ${KUTTL_PG_VERSION} instances: - name: instance1 - replicas: 2 + replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } proxy: pgBouncer: replicas: 1 diff --git a/testing/kuttl/e2e/pgbouncer/00-assert.yaml b/testing/kuttl/e2e/pgbouncer/00-assert.yaml index afe492faa0..6c3a33079f 100644 --- a/testing/kuttl/e2e/pgbouncer/00-assert.yaml +++ b/testing/kuttl/e2e/pgbouncer/00-assert.yaml @@ -5,9 +5,9 @@ metadata: status: instances: - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 --- apiVersion: v1 kind: Service diff --git a/testing/kuttl/e2e/replica-read/00--cluster.yaml b/testing/kuttl/e2e/replica-read/00--cluster.yaml index a79666f4e1..c62f5418cd 100644 --- a/testing/kuttl/e2e/replica-read/00--cluster.yaml +++ b/testing/kuttl/e2e/replica-read/00--cluster.yaml @@ -13,14 +13,3 @@ spec: requests: storage: 1Gi replicas: 2 - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml index 461ae7ccba..2d23e1e3d3 100644 --- a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml +++ b/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml @@ -9,12 +9,6 @@ spec: - name: instance1 replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } --- apiVersion: postgres-operator.crunchydata.com/v1beta1 kind: PostgresCluster @@ -27,9 +21,3 @@ spec: - name: instance1 replicas: 1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/00--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/00--create-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/01--user-schema.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/01--user-schema.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/02--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/02--create-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/03-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/04--update-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/04--update-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/05-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/README.md b/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/README.md rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/00-cluster-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/00-cluster-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/00-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml similarity index 63% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/00-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml index a3b349844a..5f8678e5e9 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/00-cluster.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml @@ -9,9 +9,3 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/02-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-db-uri/files/04-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/00--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/00--create-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml similarity index 56% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/01-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml index f1ad587c3e..244533b7ee 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/01-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml @@ -8,19 +8,19 @@ commands: users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(jq '.[] | select(.username=="bob@example.com") | .role' <<< $users_in_pgadmin) - dave_role=$(jq '.[] | select(.username=="dave@example.com") | .role' <<< $users_in_pgadmin) + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') [ $bob_role = 1 ] && [ $dave_role = 2 ] || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - bob_is_admin=$(jq '.[] | select(.username=="bob@example.com") | .isAdmin' <<< $users_in_secret) - dave_is_admin=$(jq '.[] | select(.username=="dave@example.com") | .isAdmin' <<< $users_in_secret) + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') $bob_is_admin && ! $dave_is_admin || exit 1 - bob_password=$(jq -r '.[] | select(.username=="bob@example.com") | .password' <<< $users_in_secret) - dave_password=$(jq -r '.[] | select(.username=="dave@example.com") | .password' <<< $users_in_secret) + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] || exit 1 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml new file mode 100644 index 0000000000..01aff25b3b --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') + + $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') + + [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] && [ "$jimi_password" = "password789" ] || exit 1 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml new file mode 100644 index 0000000000..1dca13a7b7 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: +# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User +- script: | + pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) + + users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") + + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') + + [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 + + users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) + + bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') + dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') + jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') + + $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 + + bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') + dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') + jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') + + [ "$bob_password" = "NEWpassword123" ] && [ "$dave_password" = "NEWpassword456" ] && [ "$jimi_password" = "NEWpassword789" ] || exit 1 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml similarity index 66% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/07-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml index b724e42b85..5c0e7267e6 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/07-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml @@ -8,12 +8,12 @@ commands: users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - bob_role=$(jq '.[] | select(.username=="bob@example.com") | .role' <<< $users_in_pgadmin) - dave_role=$(jq '.[] | select(.username=="dave@example.com") | .role' <<< $users_in_pgadmin) - jimi_role=$(jq '.[] | select(.username=="jimi@example.com") | .role' <<< $users_in_pgadmin) + bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') + dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') + jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - $(jq '. == []' <<< $users_in_secret) || exit 1 + $(printf '%s\n' $users_in_secret | jq '. == []') || exit 1 diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/README.md b/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/README.md rename to testing/kuttl/e2e/standalone-pgadmin-user-management/README.md diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/00-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/00-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/02-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/04-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/06-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-user-management/files/06-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml new file mode 100644 index 0000000000..5b95b46964 --- /dev/null +++ b/testing/kuttl/e2e/standalone-pgadmin/00-assert.yaml @@ -0,0 +1,7 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +collectors: +- type: command + command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/pgadmin=pgadmin +- namespace: $NAMESPACE + selector: postgres-operator.crunchydata.com/pgadmin=pgadmin diff --git a/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml index 8b75a3e40e..6b7c8c8794 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml @@ -6,7 +6,7 @@ commands: pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected="\"Servers\": {}" { diff --git a/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml index e9709042a8..169a8261eb 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml @@ -45,7 +45,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -58,6 +58,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml index 561cf13593..7fe5b69dc2 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml @@ -57,7 +57,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -70,6 +70,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -83,6 +84,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin2", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml index ad75223edd..323237cad4 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml @@ -67,7 +67,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -80,6 +80,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -93,6 +94,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin2", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -106,6 +108,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin3", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml index be1e124125..eca5581cb7 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml @@ -57,7 +57,7 @@ commands: exit 1 } - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py --dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") + clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") clusters_expected=' { @@ -70,6 +70,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin1", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" @@ -83,6 +84,7 @@ commands: "MaintenanceDB": "postgres", "Username": "pgadmin3", "Shared": true, + "TunnelPort": "22", "KerberosAuthentication": false, "ConnectionParameters": { "sslmode": "prefer" diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/10-invalid-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-v8/10-invalid-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/11--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/11--create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-v8/11--create-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin/11--create-cluster.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/12-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/12-assert.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-v8/12-assert.yaml rename to testing/kuttl/e2e/standalone-pgadmin/12-assert.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/README.md b/testing/kuttl/e2e/standalone-pgadmin/README.md index 187c6f37af..93d0d45d13 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/README.md +++ b/testing/kuttl/e2e/standalone-pgadmin/README.md @@ -47,3 +47,16 @@ Note: due to the (random) namespace being part of the host, we cannot check the * 09: * check that the configmap is updated in the pgadmin pod; * dump the servers from pgAdmin and check that the list has the expected 2 servers + +pgAdmin v7 vs v8 Notes: +pgAdmin v8 includes updates to `setup.py` which alter how the `dump-servers` argument +is called: +- v7: https://github.com/pgadmin-org/pgadmin4/blob/REL-7_8/web/setup.py#L175 +- v8: https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/setup.py#L79 + +You will also notice a difference in the `assert.yaml` files between the stored +config and the config returned by the `dump-servers` command. The additional setting, +`"TunnelPort": "22"`, is due to the new defaulting behavior added to pgAdmin for psycopg3. +See +- https://github.com/pgadmin-org/pgadmin4/commit/5e0daccf7655384db076512247733d7e73025d1b +- https://github.com/pgadmin-org/pgadmin4/blob/REL-8_5/web/pgadmin/utils/driver/psycopg3/server_manager.py#L94 diff --git a/testing/kuttl/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml index ebfe77f7a6..5601bd5b6c 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/files/00-pgadmin-check.yaml @@ -26,9 +26,25 @@ metadata: postgres-operator.crunchydata.com/data: pgadmin postgres-operator.crunchydata.com/role: pgadmin postgres-operator.crunchydata.com/pgadmin: pgadmin +spec: + containers: + - name: pgadmin + readinessProbe: + httpGet: + path: /login + port: 5050 + scheme: HTTP status: containerStatuses: - name: pgadmin ready: true started: true phase: Running +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + postgres-operator.crunchydata.com/role: pgadmin + postgres-operator.crunchydata.com/pgadmin: pgadmin +type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin/files/02-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/files/02-cluster.yaml index c1280caa01..d37cf895a2 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/files/02-cluster.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/files/02-cluster.yaml @@ -9,9 +9,3 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/standalone-pgadmin/files/04-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/files/04-cluster.yaml index 63a44812e1..6ad5844c4a 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/files/04-cluster.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/files/04-cluster.yaml @@ -9,9 +9,3 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/standalone-pgadmin/files/06-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/files/06-cluster.yaml index 40f60cf229..80e11eb957 100644 --- a/testing/kuttl/e2e/standalone-pgadmin/files/06-cluster.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/files/06-cluster.yaml @@ -9,9 +9,3 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/11-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/files/11-cluster.yaml similarity index 61% rename from testing/kuttl/e2e-other/standalone-pgadmin-v8/files/11-cluster.yaml rename to testing/kuttl/e2e/standalone-pgadmin/files/11-cluster.yaml index ec551d6e0f..b11b291d85 100644 --- a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/11-cluster.yaml +++ b/testing/kuttl/e2e/standalone-pgadmin/files/11-cluster.yaml @@ -7,9 +7,3 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/11-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin/files/11-pgadmin-check.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-v8/files/11-pgadmin-check.yaml rename to testing/kuttl/e2e/standalone-pgadmin/files/11-pgadmin-check.yaml diff --git a/testing/kuttl/e2e-other/standalone-pgadmin-v8/files/11-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/files/11-pgadmin.yaml similarity index 100% rename from testing/kuttl/e2e-other/standalone-pgadmin-v8/files/11-pgadmin.yaml rename to testing/kuttl/e2e/standalone-pgadmin/files/11-pgadmin.yaml diff --git a/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml b/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml index cd0e05ac15..44d1386b59 100644 --- a/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml +++ b/testing/kuttl/e2e/streaming-standby/01--primary-cluster.yaml @@ -11,9 +11,3 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml b/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml index a3c542addb..ebe382041a 100644 --- a/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml +++ b/testing/kuttl/e2e/streaming-standby/03--standby-cluster.yaml @@ -14,9 +14,3 @@ spec: instances: - name: instance1 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/switchover/01--cluster.yaml b/testing/kuttl/e2e/switchover/01--cluster.yaml index 4b0d598ff1..4c91dd85ec 100644 --- a/testing/kuttl/e2e/switchover/01--cluster.yaml +++ b/testing/kuttl/e2e/switchover/01--cluster.yaml @@ -12,9 +12,3 @@ spec: instances: - replicas: 2 dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml b/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml index edeebeb8bb..ea69a7264f 100644 --- a/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml +++ b/testing/kuttl/e2e/tablespace-enabled/00--cluster.yaml @@ -39,14 +39,3 @@ spec: resources: requests: storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml b/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml index 9351766c4f..ad436fc892 100644 --- a/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml +++ b/testing/kuttl/e2e/tablespace-enabled/00-assert.yaml @@ -9,15 +9,6 @@ status: replicas: 1 updatedReplicas: 1 --- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: tablespace-enabled - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 ---- apiVersion: v1 kind: Service metadata: diff --git a/testing/kuttl/e2e/wal-pvc-pgupgrade/06--check-spool-path.yaml b/testing/kuttl/e2e/wal-pvc-pgupgrade/06--check-spool-path.yaml new file mode 100644 index 0000000000..4b52bce16e --- /dev/null +++ b/testing/kuttl/e2e/wal-pvc-pgupgrade/06--check-spool-path.yaml @@ -0,0 +1,19 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: +- script: | + PRIMARY=$( + kubectl get pod --namespace "${NAMESPACE}" \ + --output name --selector ' + postgres-operator.crunchydata.com/role=master' + ) + + LIST=$( + kubectl exec --namespace "${NAMESPACE}" -c database "${PRIMARY}" -- \ + ls -l /pgdata + ) + + contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } + + # Confirm that the pgbackrest spool-path has been symlinked to the wal volume. + contains "$LIST" "pgbackrest-spool -> /pgwal/pgbackrest-spool" || exit 1 diff --git a/trivy.yaml b/trivy.yaml new file mode 100644 index 0000000000..b2ef32d785 --- /dev/null +++ b/trivy.yaml @@ -0,0 +1,14 @@ +# https://aquasecurity.github.io/trivy/latest/docs/references/configuration/config-file/ +--- +# Specify an exact list of recognized and acceptable licenses. +# [A GitHub workflow](/.github/workflows/trivy.yaml) rejects pull requests that +# import licenses not in this list. +# +# https://aquasecurity.github.io/trivy/latest/docs/scanner/license/ +license: + ignored: + - Apache-2.0 + - BSD-2-Clause + - BSD-3-Clause + - ISC + - MIT